2021-02-02 21:09:18 +00:00
|
|
|
// Copyright 2020 Google LLC. All rights reserved.
|
|
|
|
//
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file or at
|
|
|
|
// https://developers.google.com/open-source/licenses/bsd
|
|
|
|
|
2023-10-10 23:51:11 +00:00
|
|
|
#include <packager/file/http_file.h>
|
2021-02-02 21:09:18 +00:00
|
|
|
|
2021-02-02 18:51:50 +00:00
|
|
|
#include <memory>
|
2021-02-02 21:09:18 +00:00
|
|
|
#include <vector>
|
|
|
|
|
2023-10-09 23:21:41 +00:00
|
|
|
#include <absl/strings/str_split.h>
|
2023-10-11 08:49:50 +00:00
|
|
|
#include <gtest/gtest.h>
|
|
|
|
#include <nlohmann/json.hpp>
|
|
|
|
|
2023-10-10 23:51:11 +00:00
|
|
|
#include <packager/file/file.h>
|
|
|
|
#include <packager/file/file_closer.h>
|
|
|
|
#include <packager/media/test/test_web_server.h>
|
2021-02-02 18:51:50 +00:00
|
|
|
|
feat: First phase of CMake build system implementation (#1072)
There are a lot of changes in this first phase, because there was a
lot of infrastructure required to get some meaningful amount of
porting done. Future PRs should be simpler.
<b>Summary of changes:</b><details>
- Remove old deps:
- boringssl (replaced with mbedtls, lighter, easier to build)
- gflags (replaced with absl::flags)
- Chromium build tools
- New deps to replace parts of Chromium base:
- abseil-cpp
- glog
- nlohmann::json (for tests only)
- Submodules, updates, and CMake build rules for third-party
libraries:
- curl
- gmock/gtest
- Ported internal libraries and their tests by removing Chromium deps
and adding CMake build rules:
- file (now using C++17 filesystem APIs)
- license_notice
- status
- version
- Test improvements
- Removed file tests that can never be re-enabled
- Re-enabled all other disabled file tests
- Debug JSON values when HTTP tests fail
- Fixed chunked-encoding issues in HTTP tests
- Updated and refactored Dockerfiles testing
- All docker files working, with OS versions updated to meet the
new tool requirements
- Local docker builds no longer write files to your working
directory as root
- Local docker builds can now be run in parallel without clobbering
each others' build outputs
- DEBUG=1 can drop you into an interactive shell when a docker
build fails
- Updated and heavily refactored workflows and Dockerfiles
- All docker files now tested in parallel on GitHub, speeding up CI
- All common workflow components broken out and using workflow_call
instead of custom actions
- Self-hosted runners now optional, to make testing easier on forks
- CMake porting works-in-process can now be fully tested on GitHub
- Building ported libraries and passing ported tests on all three
platforms!
- CI hacks for macOS removed, now testing on macos-latest!
- Python2 no longer required! (Only Python3)
- Using strict build flags, treating all warnings as errors.
</details>
<b>Required to build:</b>
- CMake >= 3.16
- Python 3
- A compiler supporting C++ >= 17
- g++ >= 9 if using GCC (Clang also fine)
- MSVC for Windows
<b>Still needs work:</b><details>
- Moving other dependencies into submodules (if we keep them):
- apple_apsl
- icu
- libevent
- libpng
- libwebm
- libxml
- modp_b64
- protobuf
- zlib
- Port remaining internal libraries:
- app
- hls
- media/base
- media/chunking
- media/codecs
- media/crypto
- media/demuxer
- media/event
- media/formats/dvb
- media/formats/mp2t
- media/formats/mp4
- media/formats/packed_audio
- media/formats/ttml
- media/formats/webm
- media/formats/webvtt
- media/formats/wvm
- media/origin
- media/public
- media/replicator
- media/trick_play
- mpd
- Port main application
- Add logging flags in absl and connect them to glog (which expects
gflags)
- Port pssh-box.py
- Port main test targets (packager_test.py and packager_app.py)
- Updating all requirement and build documentation
- Remove any remaining refs to gclient, depot_tools, ninja
- Update and complete release workflows using release-please
</details>
Issue #346 (Switch to abseil)
Issue #1047 (New build system)
2022-08-16 18:34:51 +00:00
|
|
|
#define ASSERT_JSON_STRING(json, key, value) \
|
|
|
|
ASSERT_EQ(GetJsonString((json), (key)), (value)) << "JSON is " << (json)
|
2021-02-02 21:09:18 +00:00
|
|
|
|
2021-02-02 18:51:50 +00:00
|
|
|
namespace shaka {
|
2021-02-02 21:09:18 +00:00
|
|
|
|
2021-02-02 18:51:50 +00:00
|
|
|
namespace {
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
const std::vector<std::string> kNoHeaders;
|
|
|
|
const std::string kNoContentType;
|
|
|
|
const std::string kBinaryContentType = "application/octet-stream";
|
|
|
|
const int kDefaultTestTimeout = 10; // For a local, embedded server
|
|
|
|
|
2021-02-02 21:09:18 +00:00
|
|
|
using FilePtr = std::unique_ptr<HttpFile, FileCloser>;
|
|
|
|
|
feat: First phase of CMake build system implementation (#1072)
There are a lot of changes in this first phase, because there was a
lot of infrastructure required to get some meaningful amount of
porting done. Future PRs should be simpler.
<b>Summary of changes:</b><details>
- Remove old deps:
- boringssl (replaced with mbedtls, lighter, easier to build)
- gflags (replaced with absl::flags)
- Chromium build tools
- New deps to replace parts of Chromium base:
- abseil-cpp
- glog
- nlohmann::json (for tests only)
- Submodules, updates, and CMake build rules for third-party
libraries:
- curl
- gmock/gtest
- Ported internal libraries and their tests by removing Chromium deps
and adding CMake build rules:
- file (now using C++17 filesystem APIs)
- license_notice
- status
- version
- Test improvements
- Removed file tests that can never be re-enabled
- Re-enabled all other disabled file tests
- Debug JSON values when HTTP tests fail
- Fixed chunked-encoding issues in HTTP tests
- Updated and refactored Dockerfiles testing
- All docker files working, with OS versions updated to meet the
new tool requirements
- Local docker builds no longer write files to your working
directory as root
- Local docker builds can now be run in parallel without clobbering
each others' build outputs
- DEBUG=1 can drop you into an interactive shell when a docker
build fails
- Updated and heavily refactored workflows and Dockerfiles
- All docker files now tested in parallel on GitHub, speeding up CI
- All common workflow components broken out and using workflow_call
instead of custom actions
- Self-hosted runners now optional, to make testing easier on forks
- CMake porting works-in-process can now be fully tested on GitHub
- Building ported libraries and passing ported tests on all three
platforms!
- CI hacks for macOS removed, now testing on macos-latest!
- Python2 no longer required! (Only Python3)
- Using strict build flags, treating all warnings as errors.
</details>
<b>Required to build:</b>
- CMake >= 3.16
- Python 3
- A compiler supporting C++ >= 17
- g++ >= 9 if using GCC (Clang also fine)
- MSVC for Windows
<b>Still needs work:</b><details>
- Moving other dependencies into submodules (if we keep them):
- apple_apsl
- icu
- libevent
- libpng
- libwebm
- libxml
- modp_b64
- protobuf
- zlib
- Port remaining internal libraries:
- app
- hls
- media/base
- media/chunking
- media/codecs
- media/crypto
- media/demuxer
- media/event
- media/formats/dvb
- media/formats/mp2t
- media/formats/mp4
- media/formats/packed_audio
- media/formats/ttml
- media/formats/webm
- media/formats/webvtt
- media/formats/wvm
- media/origin
- media/public
- media/replicator
- media/trick_play
- mpd
- Port main application
- Add logging flags in absl and connect them to glog (which expects
gflags)
- Port pssh-box.py
- Port main test targets (packager_test.py and packager_app.py)
- Updating all requirement and build documentation
- Remove any remaining refs to gclient, depot_tools, ninja
- Update and complete release workflows using release-please
</details>
Issue #346 (Switch to abseil)
Issue #1047 (New build system)
2022-08-16 18:34:51 +00:00
|
|
|
// Handles keys with dots, indicating a nested field.
|
|
|
|
std::string GetJsonString(const nlohmann::json& json,
|
|
|
|
const std::string& combined_key) {
|
|
|
|
std::vector<std::string> keys = absl::StrSplit(combined_key, '.');
|
|
|
|
nlohmann::json current = json;
|
|
|
|
|
|
|
|
for (const std::string& key : keys) {
|
|
|
|
if (!current.contains(key)) {
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
current = current[key];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current.is_string()) {
|
|
|
|
return current.get<std::string>();
|
|
|
|
}
|
|
|
|
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
|
|
|
nlohmann::json HandleResponse(const FilePtr& file) {
|
2021-02-02 21:09:18 +00:00
|
|
|
std::string result;
|
|
|
|
while (true) {
|
|
|
|
char buffer[64 * 1024];
|
|
|
|
auto ret = file->Read(buffer, sizeof(buffer));
|
|
|
|
if (ret < 0)
|
|
|
|
return nullptr;
|
|
|
|
if (ret == 0)
|
|
|
|
break;
|
|
|
|
result.append(buffer, buffer + ret);
|
|
|
|
}
|
|
|
|
VLOG(1) << "Response:\n" << result;
|
|
|
|
|
feat: First phase of CMake build system implementation (#1072)
There are a lot of changes in this first phase, because there was a
lot of infrastructure required to get some meaningful amount of
porting done. Future PRs should be simpler.
<b>Summary of changes:</b><details>
- Remove old deps:
- boringssl (replaced with mbedtls, lighter, easier to build)
- gflags (replaced with absl::flags)
- Chromium build tools
- New deps to replace parts of Chromium base:
- abseil-cpp
- glog
- nlohmann::json (for tests only)
- Submodules, updates, and CMake build rules for third-party
libraries:
- curl
- gmock/gtest
- Ported internal libraries and their tests by removing Chromium deps
and adding CMake build rules:
- file (now using C++17 filesystem APIs)
- license_notice
- status
- version
- Test improvements
- Removed file tests that can never be re-enabled
- Re-enabled all other disabled file tests
- Debug JSON values when HTTP tests fail
- Fixed chunked-encoding issues in HTTP tests
- Updated and refactored Dockerfiles testing
- All docker files working, with OS versions updated to meet the
new tool requirements
- Local docker builds no longer write files to your working
directory as root
- Local docker builds can now be run in parallel without clobbering
each others' build outputs
- DEBUG=1 can drop you into an interactive shell when a docker
build fails
- Updated and heavily refactored workflows and Dockerfiles
- All docker files now tested in parallel on GitHub, speeding up CI
- All common workflow components broken out and using workflow_call
instead of custom actions
- Self-hosted runners now optional, to make testing easier on forks
- CMake porting works-in-process can now be fully tested on GitHub
- Building ported libraries and passing ported tests on all three
platforms!
- CI hacks for macOS removed, now testing on macos-latest!
- Python2 no longer required! (Only Python3)
- Using strict build flags, treating all warnings as errors.
</details>
<b>Required to build:</b>
- CMake >= 3.16
- Python 3
- A compiler supporting C++ >= 17
- g++ >= 9 if using GCC (Clang also fine)
- MSVC for Windows
<b>Still needs work:</b><details>
- Moving other dependencies into submodules (if we keep them):
- apple_apsl
- icu
- libevent
- libpng
- libwebm
- libxml
- modp_b64
- protobuf
- zlib
- Port remaining internal libraries:
- app
- hls
- media/base
- media/chunking
- media/codecs
- media/crypto
- media/demuxer
- media/event
- media/formats/dvb
- media/formats/mp2t
- media/formats/mp4
- media/formats/packed_audio
- media/formats/ttml
- media/formats/webm
- media/formats/webvtt
- media/formats/wvm
- media/origin
- media/public
- media/replicator
- media/trick_play
- mpd
- Port main application
- Add logging flags in absl and connect them to glog (which expects
gflags)
- Port pssh-box.py
- Port main test targets (packager_test.py and packager_app.py)
- Updating all requirement and build documentation
- Remove any remaining refs to gclient, depot_tools, ninja
- Update and complete release workflows using release-please
</details>
Issue #346 (Switch to abseil)
Issue #1047 (New build system)
2022-08-16 18:34:51 +00:00
|
|
|
nlohmann::json value = nlohmann::json::parse(result,
|
|
|
|
/* parser callback */ nullptr,
|
|
|
|
/* allow exceptions */ false);
|
|
|
|
return value;
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
2021-02-02 18:51:50 +00:00
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
// Quoting gtest docs:
|
|
|
|
// "For each TEST_F, GoogleTest will create a fresh test fixture object,
|
|
|
|
// immediately call SetUp(), run the test body, call TearDown(), and then
|
|
|
|
// delete the test fixture object."
|
|
|
|
// So we don't need a TearDown method. The destructor on TestWebServer is good
|
|
|
|
// enough.
|
|
|
|
class HttpFileTest : public testing::Test {
|
|
|
|
protected:
|
2023-07-18 23:19:52 +00:00
|
|
|
void SetUp() override { ASSERT_TRUE(server_.Start()); }
|
2023-07-14 01:55:48 +00:00
|
|
|
|
|
|
|
media::TestWebServer server_;
|
|
|
|
};
|
|
|
|
|
2021-02-02 18:51:50 +00:00
|
|
|
} // namespace
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, BasicGet) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kGet, server_.ReflectUrl(),
|
2023-07-14 01:55:48 +00:00
|
|
|
kNoContentType, kNoHeaders, kDefaultTestTimeout));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
|
|
|
auto json = HandleResponse(file);
|
|
|
|
ASSERT_TRUE(json.is_object());
|
|
|
|
ASSERT_TRUE(file.release()->Close());
|
|
|
|
ASSERT_JSON_STRING(json, "method", "GET");
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, CustomHeaders) {
|
2023-07-13 22:55:36 +00:00
|
|
|
std::vector<std::string> headers{"Host: foo", "X-My-Header: Something"};
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kGet, server_.ReflectUrl(),
|
2023-07-14 01:55:48 +00:00
|
|
|
kNoContentType, headers, kDefaultTestTimeout));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
|
|
|
auto json = HandleResponse(file);
|
|
|
|
ASSERT_TRUE(json.is_object());
|
|
|
|
ASSERT_TRUE(file.release()->Close());
|
|
|
|
|
|
|
|
ASSERT_JSON_STRING(json, "method", "GET");
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Host", "foo");
|
|
|
|
ASSERT_JSON_STRING(json, "headers.X-My-Header", "Something");
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, BasicPost) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kPost, server_.ReflectUrl(),
|
2023-07-14 01:55:48 +00:00
|
|
|
kBinaryContentType, kNoHeaders,
|
|
|
|
kDefaultTestTimeout));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
2021-02-02 21:09:18 +00:00
|
|
|
const std::string data = "abcd";
|
|
|
|
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_EQ(file->Write(data.data(), data.size()),
|
|
|
|
static_cast<int64_t>(data.size()));
|
2023-07-14 01:55:48 +00:00
|
|
|
// Signal that there will be no more writes.
|
|
|
|
// If we don't do this, the request can hang in libcurl.
|
|
|
|
file->CloseForWriting();
|
2023-07-13 22:55:36 +00:00
|
|
|
|
|
|
|
auto json = HandleResponse(file);
|
|
|
|
ASSERT_TRUE(json.is_object());
|
|
|
|
ASSERT_TRUE(file.release()->Close());
|
|
|
|
|
|
|
|
ASSERT_JSON_STRING(json, "method", "POST");
|
2023-07-14 01:55:48 +00:00
|
|
|
ASSERT_JSON_STRING(json, "body", data);
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Content-Type", kBinaryContentType);
|
2023-07-13 22:55:36 +00:00
|
|
|
|
|
|
|
// Curl may choose to send chunked or not based on the data. We request
|
|
|
|
// chunked encoding, but don't control if it is actually used. If we get
|
|
|
|
// chunked transfer, there is no Content-Length header reflected back to us.
|
|
|
|
if (!GetJsonString(json, "headers.Content-Length").empty()) {
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Content-Length",
|
|
|
|
std::to_string(data.size()));
|
|
|
|
} else {
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Transfer-Encoding", "chunked");
|
|
|
|
}
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, BasicPut) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kPut, server_.ReflectUrl(),
|
2023-07-14 01:55:48 +00:00
|
|
|
kBinaryContentType, kNoHeaders,
|
|
|
|
kDefaultTestTimeout));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
2021-02-02 21:09:18 +00:00
|
|
|
const std::string data = "abcd";
|
|
|
|
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_EQ(file->Write(data.data(), data.size()),
|
|
|
|
static_cast<int64_t>(data.size()));
|
2023-07-14 01:55:48 +00:00
|
|
|
// Signal that there will be no more writes.
|
|
|
|
// If we don't do this, the request can hang in libcurl.
|
|
|
|
file->CloseForWriting();
|
2023-07-13 22:55:36 +00:00
|
|
|
|
|
|
|
auto json = HandleResponse(file);
|
|
|
|
ASSERT_TRUE(json.is_object());
|
|
|
|
ASSERT_TRUE(file.release()->Close());
|
|
|
|
|
|
|
|
ASSERT_JSON_STRING(json, "method", "PUT");
|
2023-07-14 01:55:48 +00:00
|
|
|
ASSERT_JSON_STRING(json, "body", data);
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Content-Type", kBinaryContentType);
|
2023-07-13 22:55:36 +00:00
|
|
|
|
|
|
|
// Curl may choose to send chunked or not based on the data. We request
|
|
|
|
// chunked encoding, but don't control if it is actually used. If we get
|
|
|
|
// chunked transfer, there is no Content-Length header reflected back to us.
|
|
|
|
if (!GetJsonString(json, "headers.Content-Length").empty()) {
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Content-Length",
|
|
|
|
std::to_string(data.size()));
|
|
|
|
} else {
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Transfer-Encoding", "chunked");
|
|
|
|
}
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, MultipleWrites) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kPut, server_.ReflectUrl(),
|
2023-07-14 01:55:48 +00:00
|
|
|
kBinaryContentType, kNoHeaders,
|
|
|
|
kDefaultTestTimeout));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
2021-02-02 21:09:18 +00:00
|
|
|
const std::string data1 = "abcd";
|
|
|
|
const std::string data2 = "efgh";
|
|
|
|
const std::string data3 = "ijkl";
|
|
|
|
const std::string data4 = "mnop";
|
|
|
|
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_EQ(file->Write(data1.data(), data1.size()),
|
|
|
|
static_cast<int64_t>(data1.size()));
|
|
|
|
ASSERT_EQ(file->Write(data2.data(), data2.size()),
|
|
|
|
static_cast<int64_t>(data2.size()));
|
|
|
|
ASSERT_EQ(file->Write(data3.data(), data3.size()),
|
|
|
|
static_cast<int64_t>(data3.size()));
|
|
|
|
ASSERT_EQ(file->Write(data4.data(), data4.size()),
|
|
|
|
static_cast<int64_t>(data4.size()));
|
2023-07-14 01:55:48 +00:00
|
|
|
// Signal that there will be no more writes.
|
|
|
|
// If we don't do this, the request can hang in libcurl.
|
|
|
|
file->CloseForWriting();
|
2023-07-13 22:55:36 +00:00
|
|
|
|
|
|
|
auto json = HandleResponse(file);
|
|
|
|
ASSERT_TRUE(json.is_object());
|
|
|
|
ASSERT_TRUE(file.release()->Close());
|
|
|
|
|
|
|
|
ASSERT_JSON_STRING(json, "method", "PUT");
|
2023-07-14 01:55:48 +00:00
|
|
|
ASSERT_JSON_STRING(json, "body", data1 + data2 + data3 + data4);
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Content-Type", kBinaryContentType);
|
2023-07-13 22:55:36 +00:00
|
|
|
|
|
|
|
// Curl may choose to send chunked or not based on the data. We request
|
|
|
|
// chunked encoding, but don't control if it is actually used. If we get
|
|
|
|
// chunked transfer, there is no Content-Length header reflected back to us.
|
|
|
|
if (!GetJsonString(json, "headers.Content-Length").empty()) {
|
|
|
|
auto totalSize = data1.size() + data2.size() + data3.size() + data4.size();
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Content-Length",
|
|
|
|
std::to_string(totalSize));
|
|
|
|
} else {
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Transfer-Encoding", "chunked");
|
|
|
|
}
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, MultipleChunks) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kPut, server_.ReflectUrl(),
|
2023-07-14 01:55:48 +00:00
|
|
|
kBinaryContentType, kNoHeaders,
|
|
|
|
kDefaultTestTimeout));
|
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
|
|
|
// Each of these is written as an explicit chunk to the server.
|
|
|
|
const std::string data1 = "abcd";
|
|
|
|
const std::string data2 = "efgh";
|
|
|
|
const std::string data3 = "ijkl";
|
|
|
|
const std::string data4 = "mnop";
|
|
|
|
|
|
|
|
ASSERT_EQ(file->Write(data1.data(), data1.size()),
|
|
|
|
static_cast<int64_t>(data1.size()));
|
|
|
|
// Flush the first chunk.
|
|
|
|
ASSERT_TRUE(file->Flush());
|
|
|
|
|
|
|
|
ASSERT_EQ(file->Write(data2.data(), data2.size()),
|
|
|
|
static_cast<int64_t>(data2.size()));
|
|
|
|
// Flush the second chunk.
|
|
|
|
ASSERT_TRUE(file->Flush());
|
|
|
|
|
|
|
|
ASSERT_EQ(file->Write(data3.data(), data3.size()),
|
|
|
|
static_cast<int64_t>(data3.size()));
|
|
|
|
// Flush the third chunk.
|
|
|
|
ASSERT_TRUE(file->Flush());
|
|
|
|
|
|
|
|
ASSERT_EQ(file->Write(data4.data(), data4.size()),
|
|
|
|
static_cast<int64_t>(data4.size()));
|
|
|
|
// Flush the fourth chunk.
|
|
|
|
ASSERT_TRUE(file->Flush());
|
|
|
|
|
|
|
|
// Signal that there will be no more writes.
|
|
|
|
// If we don't do this, the request can hang in libcurl.
|
|
|
|
file->CloseForWriting();
|
|
|
|
|
|
|
|
auto json = HandleResponse(file);
|
|
|
|
ASSERT_TRUE(json.is_object());
|
|
|
|
ASSERT_TRUE(file.release()->Close());
|
|
|
|
|
|
|
|
ASSERT_JSON_STRING(json, "method", "PUT");
|
|
|
|
ASSERT_JSON_STRING(json, "body", data1 + data2 + data3 + data4);
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Content-Type", kBinaryContentType);
|
|
|
|
ASSERT_JSON_STRING(json, "headers.Transfer-Encoding", "chunked");
|
|
|
|
}
|
2021-02-02 21:09:18 +00:00
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, Error404) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kGet, server_.StatusCodeUrl(404),
|
|
|
|
kNoContentType, kNoHeaders, kDefaultTestTimeout));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
|
|
|
// The site returns an empty response.
|
|
|
|
uint8_t buffer[1];
|
|
|
|
ASSERT_EQ(file->Read(buffer, sizeof(buffer)), 0);
|
|
|
|
|
|
|
|
auto status = file.release()->CloseWithStatus();
|
|
|
|
ASSERT_FALSE(status.ok());
|
|
|
|
ASSERT_EQ(status.error_code(), error::HTTP_FAILURE);
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, TimeoutTriggered) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kGet, server_.DelayUrl(8),
|
2023-07-14 01:55:48 +00:00
|
|
|
kNoContentType, kNoHeaders,
|
|
|
|
1 /* timeout in seconds */));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
|
|
|
// Request should timeout; error is reported in Close/CloseWithStatus.
|
|
|
|
uint8_t buffer[1];
|
|
|
|
ASSERT_EQ(file->Read(buffer, sizeof(buffer)), 0);
|
|
|
|
|
|
|
|
auto status = file.release()->CloseWithStatus();
|
|
|
|
ASSERT_FALSE(status.ok());
|
|
|
|
ASSERT_EQ(status.error_code(), error::TIME_OUT);
|
2021-02-02 21:09:18 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 01:55:48 +00:00
|
|
|
TEST_F(HttpFileTest, TimeoutNotTriggered) {
|
2023-07-18 23:19:52 +00:00
|
|
|
FilePtr file(new HttpFile(HttpMethod::kGet, server_.DelayUrl(1),
|
2023-07-14 01:55:48 +00:00
|
|
|
kNoContentType, kNoHeaders,
|
|
|
|
5 /* timeout in seconds */));
|
2023-07-13 22:55:36 +00:00
|
|
|
ASSERT_TRUE(file);
|
|
|
|
ASSERT_TRUE(file->Open());
|
|
|
|
|
|
|
|
auto json = HandleResponse(file);
|
|
|
|
ASSERT_TRUE(json.is_object());
|
|
|
|
ASSERT_TRUE(file.release()->Close());
|
2021-02-02 18:51:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace shaka
|