aboutsummaryrefslogtreecommitdiff
path: root/gnu
diff options
context:
space:
mode:
Diffstat (limited to 'gnu')
-rw-r--r--gnu/packages/audio.scm74
-rw-r--r--gnu/packages/patches/webrtc-audio-processing-big-endian.patch384
2 files changed, 342 insertions, 116 deletions
diff --git a/gnu/packages/audio.scm b/gnu/packages/audio.scm
index bfe897ba23..f9da325377 100644
--- a/gnu/packages/audio.scm
+++ b/gnu/packages/audio.scm
@@ -305,55 +305,43 @@ displays a histogram of the roundtrip time jitter.")
(define-public webrtc-audio-processing
(package
(name "webrtc-audio-processing")
- (version "0.3.1")
+ (version "1.0")
(source
(origin
(method url-fetch)
(uri
(string-append "http://freedesktop.org/software/pulseaudio/"
- name "/" name "-" version ".tar.xz"))
+ name "/" name "-" version ".tar.gz"))
(sha256
- (base32 "1gsx7k77blfy171b6g3m0k0s0072v6jcawhmx1kjs9w5zlwdkzd0"))))
- (build-system gnu-build-system)
- (arguments
- ;; TODO: Move this to a snippet/patch or remove with the upgrade to 1.0.
- (if (or (target-riscv64?)
- (target-powerpc?))
- (list
- #:phases
- #~(modify-phases %standard-phases
- (add-after 'unpack 'patch-source
- (lambda* (#:key inputs #:allow-other-keys)
- (let ((patch-file
- #$(local-file
- (search-patch
- "webrtc-audio-processing-big-endian.patch"))))
- (invoke "patch" "--force" "-p1" "-i" patch-file)
- (substitute* "webrtc/typedefs.h"
- (("defined\\(__aarch64__\\)" all)
- (string-append
- ;; powerpc-linux
- "(defined(__PPC__) && __SIZEOF_SIZE_T__ == 4)\n"
- "#define WEBRTC_ARCH_32_BITS\n"
- "#define WEBRTC_ARCH_BIG_ENDIAN\n"
- ;; powerpc64-linux
- "#elif (defined(__PPC64__) && defined(_BIG_ENDIAN))\n"
- "#define WEBRTC_ARCH_64_BITS\n"
- "#define WEBRTC_ARCH_BIG_ENDIAN\n"
- ;; aarch64-linux
- "#elif " all
- ;; riscv64-linux
- " || (defined(__riscv) && __riscv_xlen == 64)"
- ;; powerpc64le-linux
- " || (defined(__PPC64__) && defined(_LITTLE_ENDIAN))"))))))))
- '()))
- (native-inputs
- (if (or (target-riscv64?)
- (target-powerpc?))
- (list
- (local-file (search-patch "webrtc-audio-processing-big-endian.patch"))
- patch)
- '()))
+ (base32 "0vwkw5xw8l37f5vbzbkipjsf03r7b8nnrfbfbhab8bkvf79306j4"))
+ (modules '((guix build utils)))
+ (snippet
+ #~(begin
+ ;; See:
+ ;; <https://gitlab.freedesktop.org/pulseaudio/webrtc-audio-processing/-/issues/4>.
+ (substitute* "meson.build"
+ (("absl_flags_registry") "absl_flags_reflection"))
+ (substitute* "webrtc/rtc_base/system/arch.h"
+ (("defined\\(__aarch64__\\)" all)
+ (string-append
+ ;; powerpc-linux
+ "(defined(__PPC__) && __SIZEOF_SIZE_T__ == 4)\n"
+ "#define WEBRTC_ARCH_32_BITS\n"
+ "#define WEBRTC_ARCH_BIG_ENDIAN\n"
+ ;; powerpc64-linux
+ "#elif (defined(__PPC64__) && defined(_BIG_ENDIAN))\n"
+ "#define WEBRTC_ARCH_64_BITS\n"
+ "#define WEBRTC_ARCH_BIG_ENDIAN\n"
+ ;; aarch64-linux
+ "#elif " all
+ ;; riscv64-linux
+ " || (defined(__riscv) && __riscv_xlen == 64)"
+ ;; powerpc64le-linux
+ " || (defined(__PPC64__) && defined(_LITTLE_ENDIAN))")))))
+ (patches
+ (search-patches "webrtc-audio-processing-big-endian.patch"))))
+ (build-system meson-build-system)
+ (inputs (list abseil-cpp))
(synopsis "WebRTC's Audio Processing Library")
(description "WebRTC-Audio-Processing library based on Google's
implementation of WebRTC.")
diff --git a/gnu/packages/patches/webrtc-audio-processing-big-endian.patch b/gnu/packages/patches/webrtc-audio-processing-big-endian.patch
index 78333fe7b7..1690597025 100644
--- a/gnu/packages/patches/webrtc-audio-processing-big-endian.patch
+++ b/gnu/packages/patches/webrtc-audio-processing-big-endian.patch
@@ -1,93 +1,331 @@
-https://bugs.freedesktop.org/show_bug.cgi?id=95738
-https://bugs.freedesktop.org/attachment.cgi?id=124025
+https://gitlab.freedesktop.org/pulseaudio/pulseaudio/-/issues/127
+https://github.com/desktop-app/tg_owt/commit/65f002e
-diff -up webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc.than webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc
---- webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc.than 2016-05-24 08:28:45.749940095 -0400
-+++ webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc 2016-05-24 08:50:30.361020010 -0400
-@@ -64,9 +64,6 @@ WavReader::~WavReader() {
+From 65f002eeda1d97ddc70c8c49ec563987203c76f5 Mon Sep 17 00:00:00 2001
+From: Nicholas Guriev <nicholas@guriev.su>
+Date: Thu, 28 Jan 2021 20:54:06 +0300
+Subject: [PATCH] Provide endianness converters before writing or after reading
+ WAV
+
+---
+ src/common_audio/wav_file.cc | 80 ++++++++++++++++++++++++++-------
+ src/common_audio/wav_header.cc | 81 ++++++++++++++++++++--------------
+ 2 files changed, 111 insertions(+), 50 deletions(-)
+
+diff --git a/src/common_audio/wav_file.cc b/src/common_audio/wav_file.cc
+index e49126f1..b5292668 100644
+--- a/webrtc/common_audio/wav_file.cc
++++ b/webrtc/common_audio/wav_file.cc
+@@ -10,6 +10,7 @@
+
+ #include "common_audio/wav_file.h"
+
++#include <byteswap.h>
+ #include <errno.h>
+
+ #include <algorithm>
+@@ -34,6 +35,38 @@ bool FormatSupported(WavFormat format) {
+ format == WavFormat::kWavFormatIeeeFloat;
}
- size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) {
++template <typename T>
++void TranslateEndianness(T* destination, const T* source, size_t length) {
++ static_assert(sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
++ "no converter, use integral types");
++ if (sizeof(T) == 2) {
++ const uint16_t* src = reinterpret_cast<const uint16_t*>(source);
++ uint16_t* dst = reinterpret_cast<uint16_t*>(destination);
++ for (size_t index = 0; index < length; index++) {
++ dst[index] = bswap_16(src[index]);
++ }
++ }
++ if (sizeof(T) == 4) {
++ const uint32_t* src = reinterpret_cast<const uint32_t*>(source);
++ uint32_t* dst = reinterpret_cast<uint32_t*>(destination);
++ for (size_t index = 0; index < length; index++) {
++ dst[index] = bswap_32(src[index]);
++ }
++ }
++ if (sizeof(T) == 8) {
++ const uint64_t* src = reinterpret_cast<const uint64_t*>(source);
++ uint64_t* dst = reinterpret_cast<uint64_t*>(destination);
++ for (size_t index = 0; index < length; index++) {
++ dst[index] = bswap_64(src[index]);
++ }
++ }
++}
++
++template <typename T>
++void TranslateEndianness(T* buffer, size_t length) {
++ TranslateEndianness(buffer, buffer, length);
++}
++
+ // Doesn't take ownership of the file handle and won't close it.
+ class WavHeaderFileReader : public WavHeaderReader {
+ public:
+@@ -89,10 +122,6 @@ void WavReader::Reset() {
+
+ size_t WavReader::ReadSamples(const size_t num_samples,
+ int16_t* const samples) {
-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
-#error "Need to convert samples to big-endian when reading from WAV file"
-#endif
- // There could be metadata after the audio; ensure we don't read it.
- num_samples = std::min(rtc::checked_cast<uint32_t>(num_samples),
- num_samples_remaining_);
-@@ -76,6 +73,12 @@ size_t WavReader::ReadSamples(size_t num
- RTC_CHECK(read == num_samples || feof(file_handle_));
- RTC_CHECK_LE(read, num_samples_remaining_);
- num_samples_remaining_ -= rtc::checked_cast<uint32_t>(read);
-+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
-+ //convert to big-endian
-+ for(size_t idx = 0; idx < num_samples; idx++) {
-+ samples[idx] = (samples[idx]<<8) | (samples[idx]>>8);
-+ }
+-
+ size_t num_samples_left_to_read = num_samples;
+ size_t next_chunk_start = 0;
+ while (num_samples_left_to_read > 0 && num_unread_samples_ > 0) {
+@@ -105,6 +134,9 @@ size_t WavReader::ReadSamples(const size_t num_samples,
+ num_bytes_read = file_.Read(samples_to_convert.data(),
+ chunk_size * sizeof(samples_to_convert[0]));
+ num_samples_read = num_bytes_read / sizeof(samples_to_convert[0]);
++#ifdef WEBRTC_ARCH_BIG_ENDIAN
++ TranslateEndianness(samples_to_convert.data(), num_samples_read);
++#endif
+
+ for (size_t j = 0; j < num_samples_read; ++j) {
+ samples[next_chunk_start + j] = FloatToS16(samples_to_convert[j]);
+@@ -114,6 +146,10 @@ size_t WavReader::ReadSamples(const size_t num_samples,
+ num_bytes_read = file_.Read(&samples[next_chunk_start],
+ chunk_size * sizeof(samples[0]));
+ num_samples_read = num_bytes_read / sizeof(samples[0]);
++
++#ifdef WEBRTC_ARCH_BIG_ENDIAN
++ TranslateEndianness(&samples[next_chunk_start], num_samples_read);
+#endif
- return read;
+ }
+ RTC_CHECK(num_samples_read == 0 || (num_bytes_read % num_samples_read) == 0)
+ << "Corrupt file: file ended in the middle of a sample.";
+@@ -129,10 +165,6 @@ size_t WavReader::ReadSamples(const size_t num_samples,
}
-@@ -120,10 +123,17 @@ WavWriter::~WavWriter() {
+ size_t WavReader::ReadSamples(const size_t num_samples, float* const samples) {
+-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+-#error "Need to convert samples to big-endian when reading from WAV file"
+-#endif
+-
+ size_t num_samples_left_to_read = num_samples;
+ size_t next_chunk_start = 0;
+ while (num_samples_left_to_read > 0 && num_unread_samples_ > 0) {
+@@ -145,6 +177,9 @@ size_t WavReader::ReadSamples(const size_t num_samples, float* const samples) {
+ num_bytes_read = file_.Read(samples_to_convert.data(),
+ chunk_size * sizeof(samples_to_convert[0]));
+ num_samples_read = num_bytes_read / sizeof(samples_to_convert[0]);
++#ifdef WEBRTC_ARCH_BIG_ENDIAN
++ TranslateEndianness(samples_to_convert.data(), num_samples_read);
++#endif
+
+ for (size_t j = 0; j < num_samples_read; ++j) {
+ samples[next_chunk_start + j] =
+@@ -155,6 +190,9 @@ size_t WavReader::ReadSamples(const size_t num_samples, float* const samples) {
+ num_bytes_read = file_.Read(&samples[next_chunk_start],
+ chunk_size * sizeof(samples[0]));
+ num_samples_read = num_bytes_read / sizeof(samples[0]);
++#ifdef WEBRTC_ARCH_BIG_ENDIAN
++ TranslateEndianness(&samples[next_chunk_start], num_samples_read);
++#endif
+
+ for (size_t j = 0; j < num_samples_read; ++j) {
+ samples[next_chunk_start + j] =
+@@ -213,24 +251,32 @@ WavWriter::WavWriter(FileWrapper file,
+ }
void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) {
- #ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
-#error "Need to convert samples to little-endian when writing to WAV file"
-#endif
-+ int16_t * le_samples = new int16_t[num_samples];
-+ for(size_t idx = 0; idx < num_samples; idx++) {
-+ le_samples[idx] = (samples[idx]<<8) | (samples[idx]>>8);
-+ }
-+ const size_t written =
-+ fwrite(le_samples, sizeof(*le_samples), num_samples, file_handle_);
-+ delete []le_samples;
+-
+ for (size_t i = 0; i < num_samples; i += kMaxChunksize) {
+ const size_t num_remaining_samples = num_samples - i;
+ const size_t num_samples_to_write =
+ std::min(kMaxChunksize, num_remaining_samples);
+
+ if (format_ == WavFormat::kWavFormatPcm) {
++#ifndef WEBRTC_ARCH_BIG_ENDIAN
+ RTC_CHECK(
+ file_.Write(&samples[i], num_samples_to_write * sizeof(samples[0])));
+#else
- const size_t written =
- fwrite(samples, sizeof(*samples), num_samples, file_handle_);
++ std::array<int16_t, kMaxChunksize> converted_samples;
++ TranslateEndianness(converted_samples.data(), &samples[i],
++ num_samples_to_write);
++ RTC_CHECK(
++ file_.Write(converted_samples.data(),
++ num_samples_to_write * sizeof(converted_samples[0])));
+#endif
- RTC_CHECK_EQ(num_samples, written);
- num_samples_ += static_cast<uint32_t>(written);
- RTC_CHECK(written <= std::numeric_limits<uint32_t>::max() ||
-diff -up webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc.than webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc
---- webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc.than 2016-05-24 08:50:52.591379263 -0400
-+++ webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc 2016-05-24 08:52:08.552606848 -0400
-@@ -129,7 +129,39 @@ static inline std::string ReadFourCC(uin
- return std::string(reinterpret_cast<char*>(&x), 4);
+ } else {
+ RTC_CHECK_EQ(format_, WavFormat::kWavFormatIeeeFloat);
+ std::array<float, kMaxChunksize> converted_samples;
+ for (size_t j = 0; j < num_samples_to_write; ++j) {
+ converted_samples[j] = S16ToFloat(samples[i + j]);
+ }
++#ifdef WEBRTC_ARCH_BIG_ENDIAN
++ TranslateEndianness(converted_samples.data(), num_samples_to_write);
++#endif
+ RTC_CHECK(
+ file_.Write(converted_samples.data(),
+ num_samples_to_write * sizeof(converted_samples[0])));
+@@ -243,10 +289,6 @@ void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) {
}
- #else
--#error "Write be-to-le conversion functions"
-+static inline void WriteLE16(uint16_t* f, uint16_t x) {
-+ *f = ((x << 8) & 0xff00) | ( ( x >> 8) & 0x00ff);
-+}
-+
-+static inline void WriteLE32(uint32_t* f, uint32_t x) {
-+ *f = ( (x & 0x000000ff) << 24 )
-+ | ((x & 0x0000ff00) << 8)
-+ | ((x & 0x00ff0000) >> 8)
-+ | ((x & 0xff000000) >> 24 );
-+}
-+
-+static inline void WriteFourCC(uint32_t* f, char a, char b, char c, char d) {
-+ *f = (static_cast<uint32_t>(a) << 24 )
-+ | (static_cast<uint32_t>(b) << 16)
-+ | (static_cast<uint32_t>(c) << 8)
-+ | (static_cast<uint32_t>(d) );
-+}
+
+ void WavWriter::WriteSamples(const float* samples, size_t num_samples) {
+-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+-#error "Need to convert samples to little-endian when writing to WAV file"
+-#endif
+-
+ for (size_t i = 0; i < num_samples; i += kMaxChunksize) {
+ const size_t num_remaining_samples = num_samples - i;
+ const size_t num_samples_to_write =
+@@ -257,6 +299,9 @@ void WavWriter::WriteSamples(const float* samples, size_t num_samples) {
+ for (size_t j = 0; j < num_samples_to_write; ++j) {
+ converted_samples[j] = FloatS16ToS16(samples[i + j]);
+ }
++#ifdef WEBRTC_ARCH_BIG_ENDIAN
++ TranslateEndianness(converted_samples.data(), num_samples_to_write);
++#endif
+ RTC_CHECK(
+ file_.Write(converted_samples.data(),
+ num_samples_to_write * sizeof(converted_samples[0])));
+@@ -266,6 +311,9 @@ void WavWriter::WriteSamples(const float* samples, size_t num_samples) {
+ for (size_t j = 0; j < num_samples_to_write; ++j) {
+ converted_samples[j] = FloatS16ToFloat(samples[i + j]);
+ }
++#ifdef WEBRTC_ARCH_BIG_ENDIAN
++ TranslateEndianness(converted_samples.data(), num_samples_to_write);
++#endif
+ RTC_CHECK(
+ file_.Write(converted_samples.data(),
+ num_samples_to_write * sizeof(converted_samples[0])));
+diff --git a/webrtc/common_audio/wav_header.cc b/webrtc/common_audio/wav_header.cc
+index 1ccbffca..98264a5c 100644
+--- a/src/common_audio/wav_header.cc
++++ b/src/common_audio/wav_header.cc
+@@ -14,6 +14,8 @@
+
+ #include "common_audio/wav_header.h"
+
++#include <endian.h>
+
-+static inline uint16_t ReadLE16(uint16_t x) {
-+ return (( x & 0x00ff) << 8 )| ((x & 0xff00)>>8);
-+}
+ #include <cstring>
+ #include <limits>
+ #include <string>
+@@ -26,10 +28,6 @@
+ namespace webrtc {
+ namespace {
+
+-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+-#error "Code not working properly for big endian platforms."
+-#endif
+-
+ #pragma pack(2)
+ struct ChunkHeader {
+ uint32_t ID;
+@@ -174,6 +172,8 @@ bool FindWaveChunk(ChunkHeader* chunk_header,
+ if (readable->Read(chunk_header, sizeof(*chunk_header)) !=
+ sizeof(*chunk_header))
+ return false; // EOF.
++ chunk_header->Size = le32toh(chunk_header->Size);
+
-+static inline uint32_t ReadLE32(uint32_t x) {
-+ return ( (x & 0x000000ff) << 24 )
-+ | ( (x & 0x0000ff00) << 8 )
-+ | ( (x & 0x00ff0000) >> 8)
-+ | ( (x & 0xff000000) >> 24 );
-+}
+ if (ReadFourCC(chunk_header->ID) == sought_chunk_id)
+ return true; // Sought chunk found.
+ // Ignore current chunk by skipping its payload.
+@@ -187,6 +187,13 @@ bool ReadFmtChunkData(FmtPcmSubchunk* fmt_subchunk, WavHeaderReader* readable) {
+ if (readable->Read(&(fmt_subchunk->AudioFormat), kFmtPcmSubchunkSize) !=
+ kFmtPcmSubchunkSize)
+ return false;
++ fmt_subchunk->AudioFormat = le16toh(fmt_subchunk->AudioFormat);
++ fmt_subchunk->NumChannels = le16toh(fmt_subchunk->NumChannels);
++ fmt_subchunk->SampleRate = le32toh(fmt_subchunk->SampleRate);
++ fmt_subchunk->ByteRate = le32toh(fmt_subchunk->ByteRate);
++ fmt_subchunk->BlockAlign = le16toh(fmt_subchunk->BlockAlign);
++ fmt_subchunk->BitsPerSample = le16toh(fmt_subchunk->BitsPerSample);
+
-+static inline std::string ReadFourCC(uint32_t x) {
-+ x = ReadLE32(x);
-+ return std::string(reinterpret_cast<char*>(&x), 4);
-+}
- #endif
+ const uint32_t fmt_size = fmt_subchunk->header.Size;
+ if (fmt_size != kFmtPcmSubchunkSize) {
+ // There is an optional two-byte extension field permitted to be present
+@@ -214,19 +221,22 @@ void WritePcmWavHeader(size_t num_channels,
+ auto header = rtc::MsanUninitialized<WavHeaderPcm>({});
+ const size_t bytes_in_payload = bytes_per_sample * num_samples;
+
+- header.riff.header.ID = PackFourCC('R', 'I', 'F', 'F');
+- header.riff.header.Size = RiffChunkSize(bytes_in_payload, *header_size);
+- header.riff.Format = PackFourCC('W', 'A', 'V', 'E');
+- header.fmt.header.ID = PackFourCC('f', 'm', 't', ' ');
+- header.fmt.header.Size = kFmtPcmSubchunkSize;
+- header.fmt.AudioFormat = MapWavFormatToHeaderField(WavFormat::kWavFormatPcm);
+- header.fmt.NumChannels = static_cast<uint16_t>(num_channels);
+- header.fmt.SampleRate = sample_rate;
+- header.fmt.ByteRate = ByteRate(num_channels, sample_rate, bytes_per_sample);
+- header.fmt.BlockAlign = BlockAlign(num_channels, bytes_per_sample);
+- header.fmt.BitsPerSample = static_cast<uint16_t>(8 * bytes_per_sample);
+- header.data.header.ID = PackFourCC('d', 'a', 't', 'a');
+- header.data.header.Size = static_cast<uint32_t>(bytes_in_payload);
++ header.riff.header.ID = htole32(PackFourCC('R', 'I', 'F', 'F'));
++ header.riff.header.Size =
++ htole32(RiffChunkSize(bytes_in_payload, *header_size));
++ header.riff.Format = htole32(PackFourCC('W', 'A', 'V', 'E'));
++ header.fmt.header.ID = htole32(PackFourCC('f', 'm', 't', ' '));
++ header.fmt.header.Size = htole32(kFmtPcmSubchunkSize);
++ header.fmt.AudioFormat =
++ htole16(MapWavFormatToHeaderField(WavFormat::kWavFormatPcm));
++ header.fmt.NumChannels = htole16(num_channels);
++ header.fmt.SampleRate = htole32(sample_rate);
++ header.fmt.ByteRate =
++ htole32(ByteRate(num_channels, sample_rate, bytes_per_sample));
++ header.fmt.BlockAlign = htole16(BlockAlign(num_channels, bytes_per_sample));
++ header.fmt.BitsPerSample = htole16(8 * bytes_per_sample);
++ header.data.header.ID = htole32(PackFourCC('d', 'a', 't', 'a'));
++ header.data.header.Size = htole32(bytes_in_payload);
+
+ // Do an extra copy rather than writing everything to buf directly, since buf
+ // might not be correctly aligned.
+@@ -245,24 +255,26 @@ void WriteIeeeFloatWavHeader(size_t num_channels,
+ auto header = rtc::MsanUninitialized<WavHeaderIeeeFloat>({});
+ const size_t bytes_in_payload = bytes_per_sample * num_samples;
+
+- header.riff.header.ID = PackFourCC('R', 'I', 'F', 'F');
+- header.riff.header.Size = RiffChunkSize(bytes_in_payload, *header_size);
+- header.riff.Format = PackFourCC('W', 'A', 'V', 'E');
+- header.fmt.header.ID = PackFourCC('f', 'm', 't', ' ');
+- header.fmt.header.Size = kFmtIeeeFloatSubchunkSize;
++ header.riff.header.ID = htole32(PackFourCC('R', 'I', 'F', 'F'));
++ header.riff.header.Size =
++ htole32(RiffChunkSize(bytes_in_payload, *header_size));
++ header.riff.Format = htole32(PackFourCC('W', 'A', 'V', 'E'));
++ header.fmt.header.ID = htole32(PackFourCC('f', 'm', 't', ' '));
++ header.fmt.header.Size = htole32(kFmtIeeeFloatSubchunkSize);
+ header.fmt.AudioFormat =
+- MapWavFormatToHeaderField(WavFormat::kWavFormatIeeeFloat);
+- header.fmt.NumChannels = static_cast<uint16_t>(num_channels);
+- header.fmt.SampleRate = sample_rate;
+- header.fmt.ByteRate = ByteRate(num_channels, sample_rate, bytes_per_sample);
+- header.fmt.BlockAlign = BlockAlign(num_channels, bytes_per_sample);
+- header.fmt.BitsPerSample = static_cast<uint16_t>(8 * bytes_per_sample);
+- header.fmt.ExtensionSize = 0;
+- header.fact.header.ID = PackFourCC('f', 'a', 'c', 't');
+- header.fact.header.Size = 4;
+- header.fact.SampleLength = static_cast<uint32_t>(num_channels * num_samples);
+- header.data.header.ID = PackFourCC('d', 'a', 't', 'a');
+- header.data.header.Size = static_cast<uint32_t>(bytes_in_payload);
++ htole16(MapWavFormatToHeaderField(WavFormat::kWavFormatIeeeFloat));
++ header.fmt.NumChannels = htole16(num_channels);
++ header.fmt.SampleRate = htole32(sample_rate);
++ header.fmt.ByteRate =
++ htole32(ByteRate(num_channels, sample_rate, bytes_per_sample));
++ header.fmt.BlockAlign = htole16(BlockAlign(num_channels, bytes_per_sample));
++ header.fmt.BitsPerSample = htole16(8 * bytes_per_sample);
++ header.fmt.ExtensionSize = htole16(0);
++ header.fact.header.ID = htole32(PackFourCC('f', 'a', 'c', 't'));
++ header.fact.header.Size = htole32(4);
++ header.fact.SampleLength = htole32(num_channels * num_samples);
++ header.data.header.ID = htole32(PackFourCC('d', 'a', 't', 'a'));
++ header.data.header.Size = htole32(bytes_in_payload);
+
+ // Do an extra copy rather than writing everything to buf directly, since buf
+ // might not be correctly aligned.
+@@ -391,6 +403,7 @@ bool ReadWavHeader(WavHeaderReader* readable,
+ return false;
+ if (ReadFourCC(header.riff.Format) != "WAVE")
+ return false;
++ header.riff.header.Size = le32toh(header.riff.header.Size);
- static inline uint32_t RiffChunkSize(uint32_t bytes_in_payload) {
+ // Find "fmt " and "data" chunks. While the official Wave file specification
+ // does not put requirements on the chunks order, it is uncommon to find the