]> wimlib.net Git - wimlib/blob - src/resource.c
Use read_stream_list() for extraction
[wimlib] / src / resource.c
1 /*
2  * resource.c
3  *
4  * Read uncompressed and compressed metadata and file resources from a WIM file.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013 Eric Biggers
9  *
10  * This file is part of wimlib, a library for working with WIM files.
11  *
12  * wimlib is free software; you can redistribute it and/or modify it under the
13  * terms of the GNU General Public License as published by the Free Software
14  * Foundation; either version 3 of the License, or (at your option) any later
15  * version.
16  *
17  * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18  * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19  * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * wimlib; if not, see http://www.gnu.org/licenses/.
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28
29 #include "wimlib.h"
30 #include "wimlib/endianness.h"
31 #include "wimlib/error.h"
32 #include "wimlib/file_io.h"
33 #include "wimlib/lookup_table.h"
34 #include "wimlib/resource.h"
35 #include "wimlib/sha1.h"
36
37 #ifdef __WIN32__
38 /* for read_win32_file_prefix(), read_win32_encrypted_file_prefix() */
39 #  include "wimlib/win32.h"
40 #endif
41
42 #ifdef WITH_NTFS_3G
43 /* for read_ntfs_file_prefix() */
44 #  include "wimlib/ntfs_3g.h"
45 #endif
46
47 #ifdef HAVE_ALLOCA_H
48 #  include <alloca.h>
49 #endif
50 #include <errno.h>
51 #include <fcntl.h>
52 #include <stdlib.h>
53 #include <unistd.h>
54
55 /*
56  *                         Compressed WIM resources
57  *
58  * A compressed resource in a WIM consists of a number of compressed chunks,
59  * each of which decompresses to a fixed chunk size (given in the WIM header;
60  * usually 32768) except possibly the last, which always decompresses to any
61  * remaining bytes.  In addition, immediately before the chunks, a table (the
62  * "chunk table") provides the offset, in bytes relative to the end of the chunk
63  * table, of the start of each compressed chunk, except for the first chunk
64  * which is omitted as it always has an offset of 0.  Therefore, a compressed
65  * resource with N chunks will have a chunk table with N - 1 entries.
66  *
67  * Additional information:
68  *
69  * - Entries in the chunk table are 4 bytes each, except if the uncompressed
70  *   size of the resource is greater than 4 GiB, in which case the entries in
71  *   the chunk table are 8 bytes each.  In either case, the entries are unsigned
72  *   little-endian integers.
73  *
74  * - The chunk table is included in the compressed size of the resource provided
75  *   in the corresponding entry in the WIM's stream lookup table.
76  *
77  * - The compressed size of a chunk is never greater than the uncompressed size.
78  *   From the compressor's point of view, chunks that would have compressed to a
79  *   size greater than or equal to their original size are in fact stored
80  *   uncompressed.  From the decompresser's point of view, chunks with
81  *   compressed size equal to their uncompressed size are in fact uncompressed.
82  *
83  * Furthermore, wimlib supports its own "pipable" WIM format, and for this the
84  * structure of compressed resources was modified to allow piped reading and
85  * writing.  To make sequential writing possible, the chunk table is placed
86  * after the chunks rather than before the chunks, and to make sequential
87  * reading possible, each chunk is prefixed with a 4-byte header giving its
88  * compressed size as a 32-bit, unsigned, little-endian integer.  Otherwise the
89  * details are the same.
90  */
91
92
93 /* Decompress the specified chunk that uses the specified compression type
94  * @ctype, part of a WIM with default chunk size @wim_chunk_size.  For LZX the
95  * separate @wim_chunk_size is needed because it determines the window size used
96  * for LZX compression.  */
97 static int
98 decompress(const void *cchunk, unsigned clen, void *uchunk, unsigned ulen,
99            int ctype, u32 wim_chunk_size)
100 {
101         switch (ctype) {
102         case WIMLIB_COMPRESSION_TYPE_LZX:
103                 return wimlib_lzx_decompress2(cchunk, clen,
104                                               uchunk, ulen, wim_chunk_size);
105         case WIMLIB_COMPRESSION_TYPE_XPRESS:
106                 return wimlib_xpress_decompress(cchunk, clen,
107                                                 uchunk, ulen);
108         case WIMLIB_COMPRESSION_TYPE_LZMS:
109                 return wimlib_lzms_decompress(cchunk, clen, uchunk, ulen);
110         default:
111                 wimlib_assert(0);
112                 return -1;
113         }
114 }
115
116 struct data_range {
117         u64 offset;
118         u64 size;
119 };
120
121 /* Alternate chunk table format for resources with WIM_RESHDR_FLAG_CONCAT set.
122  */
123 struct alt_chunk_table_header_disk {
124         /* Uncompressed size of the resource.  */
125         le64 res_usize;
126
127         /* Number of bytes each compressed chunk decompresses into, except
128          * possibly the last which decompresses into the remainder.  */
129         le32 chunk_size;
130
131         /* ??? */
132         le32 unknown;
133
134         /* This header is directly followed by a table of compressed sizes of
135          * the chunks.  */
136 } _packed_attribute;
137
138 /*
139  * read_compressed_wim_resource() -
140  *
141  * Read data from a compressed WIM resource.
142  *
143  * @rspec
144  *      Specification of the compressed WIM resource to read from.
145  * @ranges
146  *      Nonoverlapping, nonempty ranges of the uncompressed resource data to
147  *      read, sorted by increasing offset.
148  * @num_ranges
149  *      Number of ranges in @ranges; must be at least 1.
150  * @cb
151  *      Callback function to feed the data being read.  Each call provides the
152  *      next chunk of the requested data.  Each chunk will be of nonzero size
153  *      and will not cross range boundaries, but otherwise is of unspecified
154  *      size.
155  * @cb_ctx
156  *      Parameter to pass to @cb_ctx.
157  * @raw_chunks_mode
158  *      If %true, this function will provide the raw compressed chunks of the
159  *      resource rather than the uncompressed data.  In this mode, only a single
160  *      data range can be requested, and it must cover the entire uncompressed
161  *      resource.
162  *
163  * Possible return values:
164  *
165  *      WIMLIB_ERR_SUCCESS (0)
166  *      WIMLIB_ERR_READ                   (errno set)
167  *      WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
168  *      WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
169  *      WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
170  *
171  *      or other error code returned by the @cb function.
172  */
173 static int
174 read_compressed_wim_resource(const struct wim_resource_spec * const rspec,
175                              const struct data_range * const ranges,
176                              const size_t num_ranges,
177                              const consume_data_callback_t cb,
178                              void * const cb_ctx,
179                              const bool raw_chunks_mode)
180 {
181         int ret;
182         int errno_save;
183
184         u64 *chunk_offsets = NULL;
185         u8 *ubuf = NULL;
186         void *cbuf = NULL;
187         bool chunk_offsets_malloced = false;
188         bool ubuf_malloced = false;
189         bool cbuf_malloced = false;
190
191         /* Sanity checks  */
192         wimlib_assert(rspec != NULL);
193         wimlib_assert(rspec->ctype != WIMLIB_COMPRESSION_TYPE_NONE);
194         wimlib_assert(is_power_of_2(rspec->cchunk_size));
195         wimlib_assert(cb != NULL);
196         wimlib_assert(num_ranges != 0);
197         for (size_t i = 0; i < num_ranges; i++) {
198                 DEBUG("Range %zu/%zu: %"PRIu64"@+%"PRIu64" / %"PRIu64,
199                       i + 1, num_ranges, ranges[i].size, ranges[i].offset,
200                       rspec->uncompressed_size);
201                 wimlib_assert(ranges[i].size != 0);
202                 wimlib_assert(ranges[i].offset + ranges[i].size >= ranges[i].size);
203                 wimlib_assert(ranges[i].offset + ranges[i].size <= rspec->uncompressed_size);
204         }
205         for (size_t i = 0; i < num_ranges - 1; i++)
206                 wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
207
208         if (raw_chunks_mode) {
209                 wimlib_assert(num_ranges == 1);
210                 wimlib_assert(ranges[0].offset == 0);
211                 wimlib_assert(ranges[0].size == rspec->uncompressed_size);
212         }
213
214         /* Get the offsets of the first and last bytes of the read.  */
215         const u64 first_offset = ranges[0].offset;
216         const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
217
218         /* Get the file descriptor for the WIM.  */
219         struct filedes * const in_fd = &rspec->wim->in_fd;
220
221         /* Determine if we're reading a pipable resource from a pipe or not.  */
222         const bool is_pipe_read = !filedes_is_seekable(in_fd);
223
224         /* Determine if the chunk table is in an altenate format.  */
225         const bool alt_chunk_table = (rspec->flags & WIM_RESHDR_FLAG_CONCAT) && !is_pipe_read;
226
227         /* Get the maximum size of uncompressed chunks in this resource, which
228          * we require be a power of 2.  */
229         u32 chunk_size;
230         u64 cur_read_offset = rspec->offset_in_wim;
231         if (alt_chunk_table) {
232                 /* Alternate chunk table format.  */
233                 struct alt_chunk_table_header_disk hdr;
234
235                 ret = full_pread(in_fd, &hdr, sizeof(hdr), cur_read_offset);
236                 if (ret)
237                         goto read_error;
238                 cur_read_offset += sizeof(hdr);
239
240                 chunk_size = le32_to_cpu(hdr.chunk_size);
241
242                 if (!is_power_of_2(chunk_size)) {
243                         ERROR("Invalid compressed resource: "
244                               "expected power-of-2 chunk size (got %u)", chunk_size);
245                         ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
246                         goto out_free_memory;
247                 }
248         } else {
249                 chunk_size = rspec->cchunk_size;
250         }
251         const u32 chunk_order = bsr32(chunk_size);
252
253         /* Calculate the total number of chunks the resource is divided into.  */
254         const u64 num_chunks = (rspec->uncompressed_size + chunk_size - 1) >> chunk_order;
255
256         /* Calculate the 0-based indices of the first and last chunks containing
257          * data that needs to be passed to the callback.  */
258         const u64 first_needed_chunk = first_offset >> chunk_order;
259         const u64 last_needed_chunk = last_offset >> chunk_order;
260
261         /* Calculate the 0-based index of the first chunk that actually needs to
262          * be read.  This is normally first_needed_chunk, but for pipe reads we
263          * must always start from the 0th chunk.  */
264         const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
265
266         /* Calculate the number of chunk offsets that are needed for the chunks
267          * being read.  */
268         const u64 num_needed_chunk_offsets =
269                 last_needed_chunk - read_start_chunk + 1 +
270                 (last_needed_chunk < num_chunks - 1);
271
272         /* Calculate the number of entries in the chunk table.  Normally, it's
273          * one less than the number of chunks, since the first chunk has no
274          * entry.  But in the alternate chunk table format, the chunk entries
275          * contain chunk sizes, not offsets, and there is one per chunk.  */
276         const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
277
278         /* Set the size of each chunk table entry based on the resource's
279          * uncompressed size.  XXX:  Does the alternate chunk table really
280          * always have 4-byte entries?  */
281         const u64 chunk_entry_size =
282                 (rspec->uncompressed_size > (1ULL << 32) && !alt_chunk_table)
283                         ? 8 : 4;
284
285         /* Calculate the size of the chunk table in bytes.  */
286         const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
287
288         /* Calculate the size of the chunk table in bytes, including the header
289          * in the case of the alternate chunk table format.  */
290         const u64 chunk_table_full_size =
291                 (alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
292                                   : chunk_table_size;
293
294         if (!is_pipe_read) {
295                 /* Read the needed chunk table entries into memory and use them
296                  * to initialize the chunk_offsets array.  */
297
298                 u64 first_chunk_entry_to_read;
299                 u64 last_chunk_entry_to_read;
300
301                 if (alt_chunk_table) {
302                         /* The alternate chunk table contains chunk sizes, not
303                          * offsets, so we always must read all preceding entries
304                          * in order to determine offsets.  */
305                         first_chunk_entry_to_read = 0;
306                         last_chunk_entry_to_read = last_needed_chunk;
307                 } else {
308                         /* Here we must account for the fact that the first
309                          * chunk has no explicit chunk table entry.  */
310
311                         if (read_start_chunk == 0)
312                                 first_chunk_entry_to_read = 0;
313                         else
314                                 first_chunk_entry_to_read = read_start_chunk - 1;
315
316                         if (last_needed_chunk == 0)
317                                 last_chunk_entry_to_read = 0;
318                         else
319                                 last_chunk_entry_to_read = last_needed_chunk - 1;
320
321                         if (last_needed_chunk < num_chunks - 1)
322                                 last_chunk_entry_to_read++;
323                 }
324
325                 const u64 num_chunk_entries_to_read =
326                         last_chunk_entry_to_read - first_chunk_entry_to_read + 1;
327
328                 const u64 chunk_offsets_alloc_size =
329                         max(num_chunk_entries_to_read,
330                             num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
331
332                 if ((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)
333                         goto oom;
334
335                 if (chunk_offsets_alloc_size <= STACK_MAX) {
336                         chunk_offsets = alloca(chunk_offsets_alloc_size);
337                 } else {
338                         chunk_offsets = MALLOC(chunk_offsets_alloc_size);
339                         if (chunk_offsets == NULL)
340                                 goto oom;
341                         chunk_offsets_malloced = true;
342                 }
343
344                 const size_t chunk_table_size_to_read =
345                         num_chunk_entries_to_read * chunk_entry_size;
346
347                 const u64 file_offset_of_needed_chunk_entries =
348                         cur_read_offset
349                         + (first_chunk_entry_to_read * chunk_entry_size)
350                         + (rspec->is_pipable ? (rspec->size_in_wim - chunk_table_size) : 0);
351
352                 void * const chunk_table_data =
353                         (u8*)chunk_offsets +
354                         chunk_offsets_alloc_size -
355                         chunk_table_size_to_read;
356
357                 ret = full_pread(in_fd, chunk_table_data, chunk_table_size,
358                                  file_offset_of_needed_chunk_entries);
359                 if (ret)
360                         goto read_error;
361
362                 /* Now fill in chunk_offsets from the entries we have read in
363                  * chunk_tab_data.  We break aliasing rules here to avoid having
364                  * to allocate yet another array.  */
365                 typedef le64 __attribute__((may_alias)) aliased_le64_t;
366                 typedef le32 __attribute__((may_alias)) aliased_le32_t;
367                 u64 * chunk_offsets_p = chunk_offsets;
368
369                 if (alt_chunk_table) {
370                         u64 cur_offset = 0;
371                         aliased_le32_t *raw_entries = chunk_table_data;
372
373                         for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
374                                 u32 entry = le32_to_cpu(raw_entries[i]);
375                                 if (i >= read_start_chunk)
376                                         *chunk_offsets_p++ = cur_offset;
377                                 cur_offset += entry;
378                         }
379                         if (last_needed_chunk < num_chunks - 1)
380                                 *chunk_offsets_p = cur_offset;
381                 } else {
382                         if (read_start_chunk == 0)
383                                 *chunk_offsets_p++ = 0;
384
385                         if (chunk_entry_size == 4) {
386                                 aliased_le32_t *raw_entries = chunk_table_data;
387                                 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
388                                         *chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
389                         } else {
390                                 aliased_le64_t *raw_entries = chunk_table_data;
391                                 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
392                                         *chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
393                         }
394                 }
395
396                 /* Set offset to beginning of first chunk to read.  */
397                 cur_read_offset += chunk_offsets[0];
398                 if (rspec->is_pipable)
399                         cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
400                 else
401                         cur_read_offset += chunk_table_size;
402         }
403
404         /* Allocate buffer for holding the uncompressed data of each chunk.  */
405         if (chunk_size <= STACK_MAX) {
406                 ubuf = alloca(chunk_size);
407         } else {
408                 ubuf = MALLOC(chunk_size);
409                 if (ubuf == NULL)
410                         goto oom;
411                 ubuf_malloced = true;
412         }
413
414         /* Unless the raw compressed data was requested, allocate a temporary
415          * buffer for reading compressed chunks, each of which can be at most
416          * @chunk_size - 1 bytes.  This excludes compressed chunks that are a
417          * full @chunk_size bytes, which are actually stored uncompressed.  */
418         if (!raw_chunks_mode) {
419                 if (chunk_size - 1 <= STACK_MAX) {
420                         cbuf = alloca(chunk_size - 1);
421                 } else {
422                         cbuf = MALLOC(chunk_size - 1);
423                         if (cbuf == NULL)
424                                 goto oom;
425                         cbuf_malloced = true;
426                 }
427         }
428
429         /* Set current data range.  */
430         const struct data_range *cur_range = ranges;
431         const struct data_range * const end_range = &ranges[num_ranges];
432         u64 cur_range_pos = cur_range->offset;
433         u64 cur_range_end = cur_range->offset + cur_range->size;
434
435         /* Read and process each needed chunk.  */
436         for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
437
438                 /* Calculate uncompressed size of next chunk.  */
439                 u32 chunk_usize;
440                 if ((i == num_chunks - 1) && (rspec->uncompressed_size & (chunk_size - 1)))
441                         chunk_usize = (rspec->uncompressed_size & (chunk_size - 1));
442                 else
443                         chunk_usize = chunk_size;
444
445                 /* Calculate compressed size of next chunk.  */
446                 u32 chunk_csize;
447                 if (is_pipe_read) {
448                         struct pwm_chunk_hdr chunk_hdr;
449
450                         ret = full_pread(in_fd, &chunk_hdr,
451                                          sizeof(chunk_hdr), cur_read_offset);
452                         if (ret)
453                                 goto read_error;
454                         chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
455                 } else {
456                         if (i == num_chunks - 1) {
457                                 chunk_csize = rspec->size_in_wim -
458                                               chunk_table_full_size -
459                                               chunk_offsets[i - read_start_chunk];
460                                 if (rspec->is_pipable)
461                                         chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
462                         } else {
463                                 chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
464                                               chunk_offsets[i - read_start_chunk];
465                         }
466                 }
467                 if (chunk_csize == 0 || chunk_csize > chunk_usize) {
468                         ERROR("Invalid chunk size in compressed resource!");
469                         errno = EINVAL;
470                         ret = WIMLIB_ERR_DECOMPRESSION;
471                         goto out_free_memory;
472                 }
473                 if (rspec->is_pipable)
474                         cur_read_offset += sizeof(struct pwm_chunk_hdr);
475
476                 /* Offsets in the uncompressed resource at which this chunk
477                  * starts and ends.  */
478                 const u64 chunk_start_offset = i << chunk_order;
479                 const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
480
481                 if (chunk_end_offset <= cur_range_pos) {
482
483                         /* The next range does not require data in this chunk,
484                          * so skip it.  */
485                         cur_read_offset += chunk_csize;
486                         if (is_pipe_read) {
487                                 u8 dummy;
488
489                                 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
490                                 if (ret)
491                                         goto read_error;
492                         }
493                 } else {
494
495                         /* Read the chunk and feed data to the callback
496                          * function.  */
497                         u8 *cb_buf;
498
499                         ret = full_pread(in_fd,
500                                          cbuf,
501                                          chunk_csize,
502                                          cur_read_offset);
503                         if (ret)
504                                 goto read_error;
505
506                         if (chunk_csize != chunk_usize && !raw_chunks_mode) {
507                                 DEBUG("Decompressing chunk %"PRIu64" "
508                                       "(csize=%"PRIu64" usize=%"PRIu64"",
509                                       i, chunk_csize, chunk_usize);
510                                 ret = decompress(cbuf,
511                                                  chunk_csize,
512                                                  ubuf,
513                                                  chunk_usize,
514                                                  rspec->ctype,
515                                                  chunk_size);
516                                 if (ret) {
517                                         ERROR("Failed to decompress data!");
518                                         ret = WIMLIB_ERR_DECOMPRESSION;
519                                         errno = EINVAL;
520                                         goto out_free_memory;
521                                 }
522                                 cb_buf = ubuf;
523                         } else {
524                                 /* Raw chunks requested, or data stored
525                                  * uncompressed.  */
526                                 cb_buf = cbuf;
527                         }
528                         cur_read_offset += chunk_csize;
529
530                         /* At least one range requires data in this chunk.  */
531                         do {
532                                 size_t start, end, size;
533
534                                 /* Calculate how many bytes of data should be
535                                  * sent to the callback function, taking into
536                                  * account that data sent to the callback
537                                  * function must not overlap range boundaries.
538                                  */
539                                 start = cur_range_pos - chunk_start_offset;
540                                 end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
541                                 size = end - start;
542
543                                 if (raw_chunks_mode)
544                                         ret = (*cb)(&cb_buf[0], chunk_csize, cb_ctx);
545                                 else
546                                         ret = (*cb)(&cb_buf[start], size, cb_ctx);
547
548                                 if (ret)
549                                         goto out_free_memory;
550
551                                 cur_range_pos += size;
552                                 if (cur_range_pos == cur_range_end) {
553                                         /* Advance to next range.  */
554                                         if (++cur_range == end_range) {
555                                                 cur_range_pos = ~0ULL;
556                                         } else {
557                                                 cur_range_pos = cur_range->offset;
558                                                 cur_range_end = cur_range->offset + cur_range->size;
559                                         }
560                                 }
561                         } while (cur_range_pos < chunk_end_offset);
562                 }
563         }
564
565         if (is_pipe_read &&
566             last_offset == rspec->uncompressed_size - 1 &&
567             chunk_table_size)
568         {
569                 u8 dummy;
570                 /* If reading a pipable resource from a pipe and the full data
571                  * was requested, skip the chunk table at the end so that the
572                  * file descriptor is fully clear of the resource after this
573                  * returns.  */
574                 cur_read_offset += chunk_table_size;
575                 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
576                 if (ret)
577                         goto read_error;
578         }
579         ret = 0;
580 out_free_memory:
581         errno_save = errno;
582         if (chunk_offsets_malloced)
583                 FREE(chunk_offsets);
584         if (ubuf_malloced)
585                 FREE(ubuf);
586         if (cbuf_malloced)
587                 FREE(cbuf);
588         errno = errno_save;
589         return ret;
590
591 oom:
592         ERROR("Not enough memory available to read size=%"PRIu64" bytes "
593               "from compressed WIM resource!", last_offset - first_offset + 1);
594         errno = ENOMEM;
595         ret = WIMLIB_ERR_NOMEM;
596         goto out_free_memory;
597
598 read_error:
599         ERROR_WITH_ERRNO("Error reading compressed WIM resource!");
600         goto out_free_memory;
601 }
602
603 /* Read raw data from a file descriptor at the specified offset, feeding the
604  * data it in chunks into the specified callback function.  */
605 static int
606 read_raw_file_data(struct filedes *in_fd, u64 size,
607                    consume_data_callback_t cb, void *cb_ctx, u64 offset)
608 {
609         u8 buf[BUFFER_SIZE];
610         size_t bytes_to_read;
611         int ret;
612
613         while (size) {
614                 bytes_to_read = min(sizeof(buf), size);
615                 ret = full_pread(in_fd, buf, bytes_to_read, offset);
616                 if (ret) {
617                         ERROR_WITH_ERRNO("Read error");
618                         return ret;
619                 }
620                 ret = cb(buf, bytes_to_read, cb_ctx);
621                 if (ret)
622                         return ret;
623                 size -= bytes_to_read;
624                 offset += bytes_to_read;
625         }
626         return 0;
627 }
628
629 static int
630 bufferer_cb(const void *chunk, size_t size, void *_ctx)
631 {
632         u8 **buf_p = _ctx;
633
634         *buf_p = mempcpy(*buf_p, chunk, size);
635         return 0;
636 }
637
638 /*
639  * read_partial_wim_resource()-
640  *
641  * Read a range of data from an uncompressed or compressed resource in a WIM
642  * file.  Data is fed chunk-by-chunk into the callback function @cb, passing it
643  * the argument @cb_ctx.
644  *
645  * By default, this function provides the uncompressed data of the resource, and
646  * @offset and @size and interpreted relative to the uncompressed contents of
647  * the resource.  This behavior can be modified by either of the following
648  * flags:
649  *
650  * WIMLIB_READ_RESOURCE_FLAG_RAW_FULL:
651  *      Read @size bytes at @offset of the raw contents of the compressed
652  *      resource.  In the case of pipable resources, this excludes the stream
653  *      header.  Exclusive with WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS.
654  *
655  * WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS:
656  *      Read the raw compressed chunks of the compressed resource.  @size must
657  *      be the full uncompressed size, @offset must be 0, and @cb_chunk_size
658  *      must be the resource chunk size.
659  *
660  * Return values:
661  *      WIMLIB_ERR_SUCCESS (0)
662  *      WIMLIB_ERR_READ                   (errno set)
663  *      WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
664  *      WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
665  *      WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
666  *
667  *      or other error code returned by the @cb function.
668  */
669 static int
670 read_partial_wim_resource(const struct wim_resource_spec *rspec,
671                           u64 offset, u64 size, consume_data_callback_t cb,
672                           void *cb_ctx, int flags)
673 {
674         /* Sanity checks.  */
675         if (flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL) {
676                 wimlib_assert(!(flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS));
677                 wimlib_assert(offset + size >= offset);
678                 wimlib_assert(offset + size <= rspec->size_in_wim);
679         } else if (flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS) {
680                 wimlib_assert(offset == 0);
681                 wimlib_assert(offset == rspec->uncompressed_size);
682         } else {
683                 wimlib_assert(offset + size >= offset);
684                 wimlib_assert(offset + size <= rspec->uncompressed_size);
685         }
686
687         DEBUG("Reading %"PRIu64" @ %"PRIu64" from WIM resource  "
688               "%"PRIu64" => %"PRIu64" @ %"PRIu64" (flags 0x%08x)",
689               size, offset, rspec->uncompressed_size,
690               rspec->size_in_wim, rspec->offset_in_wim, flags);
691
692         /* Trivial case.  */
693         if (size == 0)
694                 return 0;
695
696         if ((flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL) ||
697             rspec->ctype == WIMLIB_COMPRESSION_TYPE_NONE)
698         {
699                 return read_raw_file_data(&rspec->wim->in_fd,
700                                           size,
701                                           cb,
702                                           cb_ctx,
703                                           rspec->offset_in_wim + offset);
704         } else {
705                 bool raw_chunks = !!(flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS);
706                 struct data_range range = {
707                         .offset = offset,
708                         .size = size,
709                 };
710                 return read_compressed_wim_resource(rspec, &range, 1,
711                                                     cb, cb_ctx, raw_chunks);
712         }
713 }
714
715 int
716 read_partial_wim_stream_into_buf(const struct wim_lookup_table_entry *lte,
717                                  size_t size, u64 offset, void *_buf)
718 {
719         u8 *buf = _buf;
720
721         wimlib_assert(lte->resource_location == RESOURCE_IN_WIM);
722
723         return read_partial_wim_resource(lte->rspec,
724                                          lte->offset_in_res + offset,
725                                          size,
726                                          bufferer_cb,
727                                          &buf,
728                                          0);
729 }
730
731 static int
732 skip_chunk_cb(const void *chunk, size_t size, void *_ctx)
733 {
734         return 0;
735 }
736
737 /* Skip over the data of the specified stream, which must correspond to a full
738  * WIM resource.  */
739 int
740 skip_wim_stream(struct wim_lookup_table_entry *lte)
741 {
742         wimlib_assert(lte->resource_location == RESOURCE_IN_WIM);
743         wimlib_assert(!lte_is_partial(lte));
744         return read_partial_wim_resource(lte->rspec,
745                                          0,
746                                          lte->rspec->uncompressed_size,
747                                          skip_chunk_cb,
748                                          NULL,
749                                          WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS);
750 }
751
752 static int
753 read_wim_stream_prefix(const struct wim_lookup_table_entry *lte, u64 size,
754                        consume_data_callback_t cb, void *cb_ctx, int flags)
755 {
756         return read_partial_wim_resource(lte->rspec,
757                                          lte->offset_in_res,
758                                          size,
759                                          cb,
760                                          cb_ctx,
761                                          flags);
762 }
763
764 #ifndef __WIN32__
765 /* This function handles reading stream data that is located in an external
766  * file,  such as a file that has been added to the WIM image through execution
767  * of a wimlib_add_command.
768  *
769  * This assumes the file can be accessed using the standard POSIX open(),
770  * read(), and close().  On Windows this will not necessarily be the case (since
771  * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
772  * encrypted), so Windows uses its own code for its equivalent case.  */
773 static int
774 read_file_on_disk_prefix(const struct wim_lookup_table_entry *lte, u64 size,
775                          consume_data_callback_t cb, void *cb_ctx,
776                          int _ignored_flags)
777 {
778         int ret;
779         int raw_fd;
780         struct filedes fd;
781
782         wimlib_assert(size <= lte->size);
783
784         DEBUG("Reading %"PRIu64" bytes from \"%"TS"\"", size, lte->file_on_disk);
785
786         raw_fd = open(lte->file_on_disk, O_BINARY | O_RDONLY);
787         if (raw_fd < 0) {
788                 ERROR_WITH_ERRNO("Can't open \"%"TS"\"", lte->file_on_disk);
789                 return WIMLIB_ERR_OPEN;
790         }
791         filedes_init(&fd, raw_fd);
792         ret = read_raw_file_data(&fd, size, cb, cb_ctx, 0);
793         filedes_close(&fd);
794         return ret;
795 }
796 #endif /* !__WIN32__ */
797
798 /* This function handles the trivial case of reading stream data that is, in
799  * fact, already located in an in-memory buffer.  */
800 static int
801 read_buffer_prefix(const struct wim_lookup_table_entry *lte,
802                    u64 size, consume_data_callback_t cb,
803                    void *cb_ctx, int _ignored_flags)
804 {
805         wimlib_assert(size <= lte->size);
806         return (*cb)(lte->attached_buffer, size, cb_ctx);
807 }
808
809 typedef int (*read_stream_prefix_handler_t)(const struct wim_lookup_table_entry *lte,
810                                             u64 size,
811                                             consume_data_callback_t cb,
812                                             void *cb_ctx, int flags);
813
814 /*
815  * read_stream_prefix()-
816  *
817  * Reads the first @size bytes from a generic "stream", which may be located in
818  * any one of several locations, such as in a WIM file (compressed or
819  * uncompressed), in an external file, or directly in an in-memory buffer.
820  *
821  * This function feeds the data to a callback function @cb.
822  *
823  * If the stream is located in a WIM file, @flags can be set as documented in
824  * read_partial_wim_resource().  Otherwise @flags are ignored.
825  *
826  * Returns 0 on success; nonzero on error.  A nonzero value will be returned if
827  * the stream data cannot be successfully read (for a number of different
828  * reasons, depending on the stream location), or if @cb returned nonzero in
829  * which case that error code will be returned.
830  */
831 int
832 read_stream_prefix(const struct wim_lookup_table_entry *lte, u64 size,
833                    consume_data_callback_t cb, void *cb_ctx, int flags)
834 {
835         static const read_stream_prefix_handler_t handlers[] = {
836                 [RESOURCE_IN_WIM]             = read_wim_stream_prefix,
837         #ifdef __WIN32__
838                 [RESOURCE_IN_FILE_ON_DISK]    = read_win32_file_prefix,
839         #else
840                 [RESOURCE_IN_FILE_ON_DISK]    = read_file_on_disk_prefix,
841         #endif
842                 [RESOURCE_IN_ATTACHED_BUFFER] = read_buffer_prefix,
843         #ifdef WITH_FUSE
844                 [RESOURCE_IN_STAGING_FILE]    = read_file_on_disk_prefix,
845         #endif
846         #ifdef WITH_NTFS_3G
847                 [RESOURCE_IN_NTFS_VOLUME]     = read_ntfs_file_prefix,
848         #endif
849         #ifdef __WIN32__
850                 [RESOURCE_WIN32_ENCRYPTED]    = read_win32_encrypted_file_prefix,
851         #endif
852         };
853         wimlib_assert(lte->resource_location < ARRAY_LEN(handlers)
854                       && handlers[lte->resource_location] != NULL);
855         return handlers[lte->resource_location](lte, size, cb, cb_ctx, flags);
856 }
857
858 /* Read the full uncompressed data of the specified stream into the specified
859  * buffer, which must have space for at least lte->size bytes.  */
860 int
861 read_full_stream_into_buf(const struct wim_lookup_table_entry *lte, void *_buf)
862 {
863         u8 *buf = _buf;
864         return read_stream_prefix(lte, lte->size, bufferer_cb, &buf, 0);
865 }
866
867 /* Read the full uncompressed data of the specified stream.  A buffer sufficient
868  * to hold the data is allocated and returned in @buf_ret.  */
869 int
870 read_full_stream_into_alloc_buf(const struct wim_lookup_table_entry *lte,
871                                 void **buf_ret)
872 {
873         int ret;
874         void *buf;
875
876         if ((size_t)lte->size != lte->size) {
877                 ERROR("Can't read %"PRIu64" byte stream into "
878                       "memory", lte->size);
879                 return WIMLIB_ERR_NOMEM;
880         }
881
882         buf = MALLOC(lte->size);
883         if (buf == NULL)
884                 return WIMLIB_ERR_NOMEM;
885
886         ret = read_full_stream_into_buf(lte, buf);
887         if (ret) {
888                 FREE(buf);
889                 return ret;
890         }
891
892         *buf_ret = buf;
893         return 0;
894 }
895
896 /* Retrieve the full uncompressed data of the specified WIM resource.  */
897 static int
898 wim_resource_spec_to_data(struct wim_resource_spec *rspec, void **buf_ret)
899 {
900         int ret;
901         struct wim_lookup_table_entry *lte;
902
903         lte = new_lookup_table_entry();
904         if (lte == NULL)
905                 return WIMLIB_ERR_NOMEM;
906
907         lte->unhashed = 1;
908         lte_bind_wim_resource_spec(lte, rspec);
909         lte->flags = rspec->flags;
910         lte->size = rspec->uncompressed_size;
911         lte->offset_in_res = 0;
912
913         ret = read_full_stream_into_alloc_buf(lte, buf_ret);
914
915         lte_unbind_wim_resource_spec(lte);
916         free_lookup_table_entry(lte);
917         return ret;
918 }
919
920 /* Retrieve the full uncompressed data of the specified WIM resource.  */
921 int
922 wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim, void **buf_ret)
923 {
924         DEBUG("offset_in_wim=%"PRIu64", size_in_wim=%"PRIu64", "
925               "uncompressed_size=%"PRIu64,
926               reshdr->offset_in_wim, reshdr->size_in_wim, reshdr->uncompressed_size);
927
928         struct wim_resource_spec rspec;
929         wim_res_hdr_to_spec(reshdr, wim, &rspec);
930         return wim_resource_spec_to_data(&rspec, buf_ret);
931 }
932
933 struct streamifier_context {
934         struct read_stream_list_callbacks cbs;
935         struct wim_lookup_table_entry *cur_stream;
936         u64 cur_stream_offset;
937         struct wim_lookup_table_entry *final_stream;
938         size_t list_head_offset;
939 };
940
941 /* Callback for translating raw resource data into streams.  */
942 static int
943 streamifier_cb(const void *chunk, size_t size, void *_ctx)
944 {
945         struct streamifier_context *ctx = _ctx;
946         int ret;
947
948         DEBUG("%zu bytes passed to streamifier", size);
949
950         wimlib_assert(ctx->cur_stream != NULL);
951
952         if (ctx->cur_stream_offset == 0) {
953                 /* Starting a new stream.  */
954                 DEBUG("Begin new stream (size=%"PRIu64").", ctx->cur_stream->size);
955                 ret = (*ctx->cbs.begin_stream)(ctx->cur_stream, true, ctx->cbs.begin_stream_ctx);
956                 if (ret)
957                         return ret;
958         }
959
960         /* Consume the chunk.  */
961         ret = (*ctx->cbs.consume_chunk)(chunk, size, ctx->cbs.consume_chunk_ctx);
962         if (ret)
963                 return ret;
964         ctx->cur_stream_offset += size;
965
966         if (ctx->cur_stream_offset == ctx->cur_stream->size) {
967                 /* Finished reading all the data for a stream; advance to the
968                  * next one.  */
969                 DEBUG("End stream (size=%"PRIu64").", ctx->cur_stream->size);
970                 ret = (*ctx->cbs.end_stream)(ctx->cur_stream, 0, ctx->cbs.end_stream_ctx);
971                 if (ret)
972                         return ret;
973
974                 if (ctx->cur_stream != ctx->final_stream) {
975                         struct list_head *cur = (struct list_head *)
976                                         ((u8*)ctx->cur_stream + ctx->list_head_offset);
977                         struct list_head *next = cur->next;
978
979                         ctx->cur_stream = (struct wim_lookup_table_entry *)
980                                         ((u8*)next - ctx->list_head_offset);
981
982                         ctx->cur_stream_offset = 0;
983                 } else {
984                         ctx->cur_stream = NULL;
985                 }
986         }
987         return 0;
988 }
989
990 struct hasher_context {
991         SHA_CTX sha_ctx;
992         struct read_stream_list_callbacks cbs;
993 };
994
995 /* Callback for starting to read a stream while calculating its SHA1 message
996  * digest.  */
997 static int
998 hasher_begin_stream(struct wim_lookup_table_entry *lte, bool is_partial_res,
999                     void *_ctx)
1000 {
1001         struct hasher_context *ctx = _ctx;
1002
1003         sha1_init(&ctx->sha_ctx);
1004
1005         if (ctx->cbs.begin_stream == NULL)
1006                 return 0;
1007         else
1008                 return (*ctx->cbs.begin_stream)(lte, is_partial_res,
1009                                                 ctx->cbs.begin_stream_ctx);
1010 }
1011
1012 /* Callback for continuing to read a stream while calculating its SHA1 message
1013  * digest.  */
1014 static int
1015 hasher_consume_chunk(const void *chunk, size_t size, void *_ctx)
1016 {
1017         struct hasher_context *ctx = _ctx;
1018
1019         sha1_update(&ctx->sha_ctx, chunk, size);
1020         if (ctx->cbs.consume_chunk == NULL)
1021                 return 0;
1022         else
1023                 return (*ctx->cbs.consume_chunk)(chunk, size, ctx->cbs.consume_chunk_ctx);
1024 }
1025
1026 /* Callback for finishing reading a stream while calculating its SHA1 message
1027  * digest.  */
1028 static int
1029 hasher_end_stream(struct wim_lookup_table_entry *lte, int status, void *_ctx)
1030 {
1031         struct hasher_context *ctx = _ctx;
1032         u8 hash[SHA1_HASH_SIZE];
1033         int ret;
1034
1035         if (status) {
1036                 ret = status;
1037                 goto out_next_cb;
1038         }
1039
1040         sha1_final(hash, &ctx->sha_ctx);
1041
1042         if (lte->unhashed) {
1043                 /* No SHA1 message digest was present before; fill it in with
1044                  * the calculated value.  */
1045                 DEBUG("Set SHA1 message digest for stream (size=%"PRIu64").", lte->size);
1046                 copy_hash(lte->hash, hash);
1047         } else {
1048                 /* A SHA1 message digest was present before.  Verify that it is
1049                  * the same as the calculated value.  */
1050                 if (!hashes_equal(hash, lte->hash)) {
1051                         if (wimlib_print_errors) {
1052                                 ERROR("Invalid SHA1 message digest "
1053                                       "on the following WIM stream:");
1054                                 print_lookup_table_entry(lte, stderr);
1055                                 if (lte->resource_location == RESOURCE_IN_WIM)
1056                                         ERROR("The WIM file appears to be corrupt!");
1057                         }
1058                         ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
1059                         errno = EINVAL;
1060                         goto out_next_cb;
1061                 }
1062                 DEBUG("SHA1 message digest okay for stream (size=%"PRIu64").", lte->size);
1063         }
1064         ret = 0;
1065 out_next_cb:
1066         if (ctx->cbs.end_stream == NULL)
1067                 return ret;
1068         else
1069                 return (*ctx->cbs.end_stream)(lte, ret, ctx->cbs.end_stream_ctx);
1070 }
1071
1072 /* Read the full data of the stream @lte, passing the data into the specified
1073  * callbacks (all of which are optional) and either checking or computing the
1074  * SHA1 message digest of the stream.  */
1075 static int
1076 read_full_stream_with_sha1(struct wim_lookup_table_entry *lte,
1077                            const struct read_stream_list_callbacks *cbs)
1078 {
1079         int ret;
1080
1081         struct hasher_context hasher_ctx = {
1082                 .cbs = *cbs,
1083         };
1084
1085         ret = hasher_begin_stream(lte, false, &hasher_ctx);
1086         if (ret)
1087                 return ret;
1088
1089         ret = read_stream_prefix(lte, lte->size, hasher_consume_chunk,
1090                                  &hasher_ctx, 0);
1091
1092         return hasher_end_stream(lte, ret, &hasher_ctx);
1093 }
1094
1095 struct rechunkifier_context {
1096         u8 *buffer;
1097         u32 buffer_filled;
1098         u32 cb_chunk_size;
1099
1100         const struct data_range *ranges;
1101         size_t num_ranges;
1102         size_t cur_range;
1103         u64 range_bytes_remaining;
1104
1105         consume_data_callback_t cb;
1106         void *cb_ctx;
1107 };
1108
1109 /* Wrapper callback for adjusting the data chunk size.  */
1110 static int
1111 rechunkifier_cb(const void *chunk, size_t size, void *_ctx)
1112 {
1113         struct rechunkifier_context *ctx = _ctx;
1114         const u8 *chunkptr = chunk;
1115         size_t bytes_to_copy;
1116         int ret;
1117
1118         wimlib_assert(ctx->cur_range != ctx->num_ranges);
1119
1120         while (size) {
1121
1122                 /* Append more data to the buffer.  */
1123                 bytes_to_copy = size;
1124
1125                 if (bytes_to_copy > ctx->cb_chunk_size - ctx->buffer_filled)
1126                         bytes_to_copy = ctx->cb_chunk_size - ctx->buffer_filled;
1127
1128                 if (bytes_to_copy > ctx->range_bytes_remaining - ctx->buffer_filled)
1129                         bytes_to_copy = ctx->range_bytes_remaining - ctx->buffer_filled;
1130
1131                 memcpy(&ctx->buffer[ctx->buffer_filled], chunkptr, bytes_to_copy);
1132
1133                 ctx->buffer_filled += bytes_to_copy;
1134                 chunkptr += bytes_to_copy;
1135                 size -= bytes_to_copy;
1136                 ctx->range_bytes_remaining -= bytes_to_copy;
1137
1138                 if (ctx->buffer_filled == ctx->cb_chunk_size ||
1139                     ctx->range_bytes_remaining == 0)
1140                 {
1141                         /* Maximum chunk size reached, or current range ended.
1142                          * Call the next consume_data_callback_t and empty the
1143                          * buffer  */
1144                         ret = (*ctx->cb)(ctx->buffer, ctx->buffer_filled, ctx->cb_ctx);
1145                         if (ret)
1146                                 return ret;
1147                         ctx->buffer_filled = 0;
1148
1149                         if (ctx->range_bytes_remaining == 0 &&
1150                             ++ctx->cur_range != ctx->num_ranges)
1151                                 ctx->range_bytes_remaining = ctx->ranges[ctx->cur_range].size;
1152                 }
1153         }
1154         return 0;
1155 }
1156
1157 /*
1158  * Read a list of streams, each of which may be in any supported location (e.g.
1159  * in a WIM or in an external file).  Unlike read_stream_prefix() or the
1160  * functions which call it, this function optimizes the case where multiple
1161  * streams are packed into a single compressed WIM resource and reads them all
1162  * consecutively, only decompressing the data one time.
1163  *
1164  * @stream_list
1165  *      List of streams (represented as `struct wim_lookup_table_entry's) to
1166  *      read.
1167  * @list_head_offset
1168  *      Offset of the `struct list_head' within each `struct
1169  *      wim_lookup_table_entry' that makes up the @stream_list.
1170  * @cb_chunk_size
1171  *      Size of chunks to provide to @consume_chunk.  For a given stream, all
1172  *      the chunks will be this size, except possibly the last which will be the
1173  *      remainder.  If @
1174  * @cbs TODO
1175  *
1176  * Returns 0 on success; a nonzero error code on failure.  Failure can occur due
1177  * to an error reading the data or due to an error status being returned by any
1178  * of the callback functions.
1179  */
1180 int
1181 read_stream_list(struct list_head *stream_list,
1182                  size_t list_head_offset,
1183                  u32 cb_chunk_size,
1184                  const struct read_stream_list_callbacks *cbs)
1185 {
1186         int ret;
1187         struct list_head *cur, *next;
1188         struct wim_lookup_table_entry *lte;
1189
1190         ret = sort_stream_list_by_sequential_order(stream_list, list_head_offset);
1191         if (ret)
1192                 return ret;
1193
1194         for (cur = stream_list->next, next = cur->next;
1195              cur != stream_list;
1196              cur = next, next = cur->next)
1197         {
1198                 lte = (struct wim_lookup_table_entry*)((u8*)cur - list_head_offset);
1199
1200                 if (lte_is_partial(lte)) {
1201
1202                         struct wim_lookup_table_entry *lte_next, *lte_last;
1203                         struct list_head *next2;
1204                         size_t stream_count;
1205
1206                         /* The next stream is a proper sub-sequence of a WIM
1207                          * resource.  See if there are other streams in the same
1208                          * resource that need to be read.  Since
1209                          * sort_stream_list_by_sequential_order() sorted the
1210                          * streams by offset in the WIM, this can be determined
1211                          * by simply scanning forward in the list.  */
1212
1213                         lte_last = lte;
1214                         stream_count = 1;
1215                         for (next2 = next;
1216                              next2 != stream_list
1217                              && (lte_next = (struct wim_lookup_table_entry*)
1218                                                 ((u8*)next2 - list_head_offset),
1219                                  lte_next->resource_location == RESOURCE_IN_WIM
1220                                  && lte_next->rspec == lte->rspec);
1221                              next2 = next2->next)
1222                         {
1223                                 lte_last = lte_next;
1224                                 stream_count++;
1225                         }
1226                         if (stream_count > 1) {
1227                                 /* Reading multiple streams combined into a
1228                                  * single WIM resource.  They are in the stream
1229                                  * list, sorted by offset; @lte specifies the
1230                                  * first stream in the resource that needs to be
1231                                  * read and @lte_last specifies the last stream
1232                                  * in the resource that needs to be read.  */
1233
1234                                 DEBUG("Reading %zu streams combined in same "
1235                                       "WIM resource", stream_count);
1236
1237                                 next = next2;
1238
1239                                 struct data_range ranges[stream_count];
1240
1241                                 {
1242                                         struct list_head *next3;
1243                                         size_t i;
1244                                         struct wim_lookup_table_entry *lte_cur;
1245
1246                                         next3 = cur;
1247                                         for (i = 0; i < stream_count; i++) {
1248                                                 lte_cur = (struct wim_lookup_table_entry*)
1249                                                         ((u8*)next3 - list_head_offset);
1250                                                 ranges[i].offset = lte_cur->offset_in_res;
1251                                                 ranges[i].size = lte_cur->size;
1252                                                 next3 = next3->next;
1253                                         }
1254                                 }
1255
1256                                 /* Set up a chain of callbacks.
1257                                  *
1258                                  * The first level is the
1259                                  * streamifier_cb,
1260                                  * which takes in chunks of data and divides
1261                                  * them into the constituent streams.
1262                                  *
1263                                  * The second level are the SHA1 message digest
1264                                  * callbacks, which checksum each stream.
1265                                  *
1266                                  * rechunkifier_cb handles dividing the read
1267                                  * data into chunks of maximum size
1268                                  * @cb_chunk_size.  If @cb_chunk_size is 0, then
1269                                  * this callback is not needed.
1270                                  *
1271                                  * Finally, the last level of callbacks are
1272                                  * @cbs, passed as arguments to this function.
1273                                  */
1274
1275                                 struct rechunkifier_context *rechunkifier_ctx = NULL;
1276                                 consume_data_callback_t last_cb;
1277                                 void *last_cb_ctx;
1278
1279                                 if (cb_chunk_size != 0) {
1280                                         rechunkifier_ctx = alloca(sizeof(*rechunkifier_ctx));
1281                                         *rechunkifier_ctx = (struct rechunkifier_context) {
1282                                                 .buffer                 = MALLOC(cb_chunk_size),
1283                                                 .buffer_filled          = 0,
1284                                                 .cb_chunk_size          = cb_chunk_size,
1285                                                 .ranges                 = ranges,
1286                                                 .num_ranges             = stream_count,
1287                                                 .cur_range              = 0,
1288                                                 .range_bytes_remaining  = ranges[0].size,
1289                                                 .cb                     = cbs->consume_chunk,
1290                                                 .cb_ctx                 = cbs->consume_chunk_ctx,
1291                                         };
1292
1293                                         if (rechunkifier_ctx->buffer == NULL)
1294                                                 return WIMLIB_ERR_NOMEM;
1295                                         last_cb = rechunkifier_cb;
1296                                         last_cb_ctx = rechunkifier_ctx;
1297                                 } else {
1298                                         rechunkifier_ctx = NULL;
1299                                         last_cb = cbs->consume_chunk;
1300                                         last_cb_ctx = cbs->consume_chunk_ctx;
1301                                 }
1302
1303                                 struct hasher_context hasher_ctx = {
1304                                         .cbs = {
1305                                                 .begin_stream           = cbs->begin_stream,
1306                                                 .begin_stream_ctx       = cbs->begin_stream_ctx,
1307                                                 .consume_chunk          = last_cb,
1308                                                 .consume_chunk_ctx      = last_cb_ctx,
1309                                                 .end_stream             = cbs->end_stream,
1310                                                 .end_stream_ctx         = cbs->end_stream_ctx,
1311                                         },
1312                                 };
1313
1314                                 struct streamifier_context streamifier_ctx = {
1315                                         .cbs = {
1316                                                 .begin_stream           = hasher_begin_stream,
1317                                                 .begin_stream_ctx       = &hasher_ctx,
1318                                                 .consume_chunk          = hasher_consume_chunk,
1319                                                 .consume_chunk_ctx      = &hasher_ctx,
1320                                                 .end_stream             = hasher_end_stream,
1321                                                 .end_stream_ctx         = &hasher_ctx,
1322                                         },
1323                                         .cur_stream             = lte,
1324                                         .cur_stream_offset      = 0,
1325                                         .final_stream           = lte_last,
1326                                         .list_head_offset       = list_head_offset,
1327                                 };
1328
1329                                 ret = read_compressed_wim_resource(lte->rspec,
1330                                                                    ranges,
1331                                                                    stream_count,
1332                                                                    streamifier_cb,
1333                                                                    &streamifier_ctx,
1334                                                                    false);
1335                                 if (rechunkifier_ctx != NULL)
1336                                         FREE(rechunkifier_ctx->buffer);
1337
1338                                 if (ret) {
1339                                         if (streamifier_ctx.cur_stream_offset != 0) {
1340                                                 ret = (*streamifier_ctx.cbs.end_stream)
1341                                                         (streamifier_ctx.cur_stream,
1342                                                          ret,
1343                                                          streamifier_ctx.cbs.end_stream_ctx);
1344                                         }
1345                                         return ret;
1346                                 }
1347                                 continue;
1348                         }
1349                 }
1350
1351                 ret = read_full_stream_with_sha1(lte, cbs);
1352                 if (ret > 0)
1353                         return ret;
1354         }
1355         return 0;
1356 }
1357
1358 /* Extracts the first @size bytes of a stream to somewhere.  In the process, the
1359  * SHA1 message digest of the uncompressed stream is checked if the full stream
1360  * is being extracted.
1361  *
1362  * @extract_chunk is the callback to extract each chunk of the stream.  */
1363 int
1364 extract_stream(struct wim_lookup_table_entry *lte, u64 size,
1365                consume_data_callback_t extract_chunk, void *extract_chunk_arg)
1366 {
1367         if (size == lte->size) {
1368                 /* Do SHA1.  */
1369                 struct read_stream_list_callbacks cbs = {
1370                         .consume_chunk          = extract_chunk,
1371                         .consume_chunk_ctx      = extract_chunk_arg,
1372                 };
1373                 return read_full_stream_with_sha1(lte, &cbs);
1374         } else {
1375                 /* Don't do SHA1.  */
1376                 return read_stream_prefix(lte, size, extract_chunk,
1377                                           extract_chunk_arg, 0);
1378         }
1379 }
1380
1381 /* Write a chunk of data to a file descriptor.  This function can be passed as a
1382  * consume_data_callback_t.  */
1383 int
1384 extract_chunk_to_fd(const void *chunk, size_t size, void *_fd_p)
1385 {
1386         struct filedes *fd = _fd_p;
1387
1388         int ret = full_write(fd, chunk, size);
1389         if (ret) {
1390                 ERROR_WITH_ERRNO("Error writing to file descriptor");
1391                 return ret;
1392         }
1393
1394         return 0;
1395 }
1396
1397 /* Extract the first @size bytes of the specified stream to the specified file
1398  * descriptor.  If @size is the full size of the stream, its SHA1 message digest
1399  * is also checked.  */
1400 int
1401 extract_stream_to_fd(struct wim_lookup_table_entry *lte,
1402                      struct filedes *fd, u64 size)
1403 {
1404         return extract_stream(lte, size, extract_chunk_to_fd, fd);
1405 }
1406
1407 /* Calculate the SHA1 message digest of a stream, storing it in @lte->hash.  */
1408 int
1409 sha1_stream(struct wim_lookup_table_entry *lte)
1410 {
1411         struct read_stream_list_callbacks cbs = {
1412         };
1413         return read_full_stream_with_sha1(lte, &cbs);
1414 }
1415
1416 /* Convert a WIM resource header to a stand-alone resource specification.  */
1417 void
1418 wim_res_hdr_to_spec(const struct wim_reshdr *reshdr, WIMStruct *wim,
1419                     struct wim_resource_spec *spec)
1420 {
1421         spec->wim = wim;
1422         spec->offset_in_wim = reshdr->offset_in_wim;
1423         spec->size_in_wim = reshdr->size_in_wim;
1424         spec->uncompressed_size = reshdr->uncompressed_size;
1425         INIT_LIST_HEAD(&spec->lte_list);
1426         spec->flags = reshdr->flags;
1427         spec->is_pipable = wim_is_pipable(wim);
1428         if (spec->flags & (WIM_RESHDR_FLAG_COMPRESSED | WIM_RESHDR_FLAG_CONCAT)) {
1429                 spec->ctype = wim->compression_type;
1430                 spec->cchunk_size = wim->chunk_size;
1431         } else {
1432                 spec->ctype = WIMLIB_COMPRESSION_TYPE_NONE;
1433                 spec->cchunk_size = 0;
1434         }
1435 }
1436
1437 /* Convert a stand-alone resource specification to a WIM resource header.  */
1438 void
1439 wim_res_spec_to_hdr(const struct wim_resource_spec *rspec,
1440                     struct wim_reshdr *reshdr)
1441 {
1442         reshdr->offset_in_wim     = rspec->offset_in_wim;
1443         reshdr->size_in_wim       = rspec->size_in_wim;
1444         reshdr->flags             = rspec->flags;
1445         reshdr->uncompressed_size = rspec->uncompressed_size;
1446 }
1447
1448 /* Translates a WIM resource header from the on-disk format into an in-memory
1449  * format.  */
1450 int
1451 get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
1452                struct wim_reshdr *reshdr)
1453 {
1454         reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
1455         reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] <<  0) |
1456                               ((u64)disk_reshdr->size_in_wim[1] <<  8) |
1457                               ((u64)disk_reshdr->size_in_wim[2] << 16) |
1458                               ((u64)disk_reshdr->size_in_wim[3] << 24) |
1459                               ((u64)disk_reshdr->size_in_wim[4] << 32) |
1460                               ((u64)disk_reshdr->size_in_wim[5] << 40) |
1461                               ((u64)disk_reshdr->size_in_wim[6] << 48));
1462         reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
1463         reshdr->flags = disk_reshdr->flags;
1464
1465         /* Avoid possible overflows.  */
1466         if (reshdr->offset_in_wim & 0xc000000000000000ULL)
1467                 return WIMLIB_ERR_INVALID_LOOKUP_TABLE_ENTRY;
1468
1469         if (reshdr->uncompressed_size & 0xc000000000000000ULL)
1470                 return WIMLIB_ERR_INVALID_LOOKUP_TABLE_ENTRY;
1471
1472         return 0;
1473 }
1474
1475 /* Translates a WIM resource header from an in-memory format into the on-disk
1476  * format.  */
1477 void
1478 put_wim_reshdr(const struct wim_reshdr *reshdr,
1479                struct wim_reshdr_disk *disk_reshdr)
1480 {
1481         disk_reshdr->size_in_wim[0] = reshdr->size_in_wim  >>  0;
1482         disk_reshdr->size_in_wim[1] = reshdr->size_in_wim  >>  8;
1483         disk_reshdr->size_in_wim[2] = reshdr->size_in_wim  >> 16;
1484         disk_reshdr->size_in_wim[3] = reshdr->size_in_wim  >> 24;
1485         disk_reshdr->size_in_wim[4] = reshdr->size_in_wim  >> 32;
1486         disk_reshdr->size_in_wim[5] = reshdr->size_in_wim  >> 40;
1487         disk_reshdr->size_in_wim[6] = reshdr->size_in_wim  >> 48;
1488         disk_reshdr->flags = reshdr->flags;
1489         disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
1490         disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);
1491 }