Update On Fri Dec 6 19:37:13 CET 2024

This commit is contained in:
github-action[bot]
2024-12-06 19:37:13 +01:00
parent e7ed3f08e7
commit 6f64ef7b44
72 changed files with 2200 additions and 613 deletions

View File

@@ -4144,6 +4144,7 @@ set(files
src/net/base64.cpp
src/net/cipher.cpp
src/net/iobuf.cpp
src/net/io_buffer.cpp
src/net/hkdf_sha1.cpp
src/net/hmac_sha1.cpp
src/net/dns_addrinfo_helper.cpp
@@ -4210,6 +4211,7 @@ set(hfiles
src/net/base64.hpp
src/net/cipher.hpp
src/net/iobuf.hpp
src/net/io_buffer.hpp
src/net/hkdf_sha1.hpp
src/net/hmac_sha1.hpp
src/net/dns_addrinfo_helper.hpp

147
yass/src/net/io_buffer.cpp Normal file
View File

@@ -0,0 +1,147 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Chilledheart */
#include "net/io_buffer.hpp"
#include <utility>
#include "base/check_op.h"
#include "base/numerics/safe_math.h"
namespace net {
// TODO(eroman): IOBuffer is being converted to require buffer sizes and offsets
// be specified as "size_t" rather than "int" (crbug.com/488553). To facilitate
// this move (since LOTS of code needs to be updated), this function ensures
// that sizes can be safely converted to an "int" without truncation. The
// assert ensures calling this with an "int" argument is also safe.
void IOBuffer::AssertValidBufferSize(size_t size) {
static_assert(sizeof(size_t) >= sizeof(int));
gurl_base::CheckedNumeric<int>(size).ValueOrDie();
}
IOBuffer::IOBuffer() = default;
IOBuffer::IOBuffer(char* data, size_t size) : data_(data), size_(size) {
AssertValidBufferSize(size);
}
IOBuffer::~IOBuffer() = default;
IOBufferWithSize::IOBufferWithSize() = default;
IOBufferWithSize::IOBufferWithSize(size_t buffer_size) {
AssertValidBufferSize(buffer_size);
if (buffer_size) {
size_ = buffer_size;
data_ = new char[buffer_size];
}
}
IOBufferWithSize::~IOBufferWithSize() {
#if 0
data_.ClearAndDeleteArray();
#else
delete[] data_;
#endif
}
StringIOBuffer::StringIOBuffer(std::string s) : string_data_(std::move(s)) {
// Can't pass `s.data()` directly to IOBuffer constructor since moving
// from `s` may invalidate it. This is especially true for libc++ short
// string optimization where the data may be held in the string variable
// itself, instead of in a movable backing store.
AssertValidBufferSize(string_data_.size());
data_ = string_data_.data();
size_ = string_data_.size();
}
StringIOBuffer::~StringIOBuffer() {
// Clear pointer before this destructor makes it dangle.
data_ = nullptr;
}
DrainableIOBuffer::DrainableIOBuffer(scoped_refptr<IOBuffer> base, size_t size)
: IOBuffer(base->data(), size), base_(std::move(base)) {}
void DrainableIOBuffer::DidConsume(int bytes) {
SetOffset(used_ + bytes);
}
int DrainableIOBuffer::BytesRemaining() const {
return size_ - used_;
}
// Returns the number of consumed bytes.
int DrainableIOBuffer::BytesConsumed() const {
return used_;
}
void DrainableIOBuffer::SetOffset(int bytes) {
CHECK_GE(bytes, 0);
CHECK_LE(bytes, size_);
used_ = bytes;
data_ = base_->data() + used_;
}
DrainableIOBuffer::~DrainableIOBuffer() {
// Clear ptr before this destructor destroys the |base_| instance,
// making it dangle.
data_ = nullptr;
}
GrowableIOBuffer::GrowableIOBuffer() = default;
void GrowableIOBuffer::SetCapacity(int capacity) {
CHECK_GE(capacity, 0);
// this will get reset in `set_offset`.
data_ = nullptr;
size_ = 0;
// realloc will crash if it fails.
real_data_.reset(static_cast<char*>(realloc(real_data_.release(), capacity)));
capacity_ = capacity;
if (offset_ > capacity)
set_offset(capacity);
else
set_offset(offset_); // The pointer may have changed.
}
void GrowableIOBuffer::set_offset(int offset) {
CHECK_GE(offset, 0);
CHECK_LE(offset, capacity_);
offset_ = offset;
data_ = real_data_.get() + offset;
size_ = capacity_ - offset;
}
int GrowableIOBuffer::RemainingCapacity() {
return capacity_ - offset_;
}
char* GrowableIOBuffer::StartOfBuffer() {
return real_data_.get();
}
GrowableIOBuffer::~GrowableIOBuffer() {
data_ = nullptr;
}
PickledIOBuffer::PickledIOBuffer() = default;
void PickledIOBuffer::Done() {
data_ = const_cast<char*>(pickle_.data_as_char());
size_ = pickle_.size();
}
PickledIOBuffer::~PickledIOBuffer() {
// Avoid dangling ptr when this destructor destroys the pickle.
data_ = nullptr;
}
WrappedIOBuffer::WrappedIOBuffer(const char* data, size_t size) : IOBuffer(const_cast<char*>(data), size) {}
WrappedIOBuffer::~WrappedIOBuffer() = default;
} // namespace net

239
yass/src/net/io_buffer.hpp Normal file
View File

@@ -0,0 +1,239 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Chilledheart */
#ifndef NET_BASE_IO_BUFFER_H_
#define NET_BASE_IO_BUFFER_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <string>
#include "base/memory/free_deleter.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/pickle.h"
namespace net {
// IOBuffers are reference counted data buffers used for easier asynchronous
// IO handling.
//
// They are often used as the destination buffers for Read() operations, or as
// the source buffers for Write() operations.
//
// IMPORTANT: Never re-use an IOBuffer after cancelling the IO operation that
// was using it, since this may lead to memory corruption!
//
// -----------------------
// Ownership of IOBuffers:
// -----------------------
//
// Although IOBuffers are RefCountedThreadSafe, they are not intended to be
// used as a shared buffer, nor should they be used simultaneously across
// threads. The fact that they are reference counted is an implementation
// detail for allowing them to outlive cancellation of asynchronous
// operations.
//
// Instead, think of the underlying |char*| buffer contained by the IOBuffer
// as having exactly one owner at a time.
//
// Whenever you call an asynchronous operation that takes an IOBuffer,
// ownership is implicitly transferred to the called function, until the
// operation has completed (at which point it transfers back to the caller).
//
// ==> The IOBuffer's data should NOT be manipulated, destroyed, or read
// until the operation has completed.
//
// ==> Cancellation does NOT count as completion. If an operation using
// an IOBuffer is cancelled, the caller should release their
// reference to this IOBuffer at the time of cancellation since
// they can no longer use it.
//
// For instance, if you were to call a Read() operation on some class which
// takes an IOBuffer, and then delete that class (which generally will
// trigger cancellation), the IOBuffer which had been passed to Read() should
// never be re-used.
//
// This usage contract is assumed by any API which takes an IOBuffer, even
// though it may not be explicitly mentioned in the function's comments.
//
// -----------------------
// Motivation
// -----------------------
//
// The motivation for transferring ownership during cancellation is
// to make it easier to work with un-cancellable operations.
//
// For instance, let's say under the hood your API called out to the
// operating system's synchronous ReadFile() function on a worker thread.
// When cancelling through our asynchronous interface, we have no way of
// actually aborting the in progress ReadFile(). We must let it keep running,
// and hence the buffer it was reading into must remain alive. Using
// reference counting we can add a reference to the IOBuffer and make sure
// it is not destroyed until after the synchronous operation has completed.
// Base class, never instantiated, does not own the buffer.
class IOBuffer : public gurl_base::RefCountedThreadSafe<IOBuffer> {
public:
int size() const { return size_; }
char* data() { return data_; }
const char* data() const { return data_; }
uint8_t* bytes() { return reinterpret_cast<uint8_t*>(data()); }
const uint8_t* bytes() const { return reinterpret_cast<const uint8_t*>(data()); }
protected:
friend class gurl_base::RefCountedThreadSafe<IOBuffer>;
static void AssertValidBufferSize(size_t size);
IOBuffer();
IOBuffer(char* data, size_t size);
virtual ~IOBuffer();
raw_ptr<char> data_ = nullptr;
int size_ = 0;
};
// Class which owns its buffer and manages its destruction.
class IOBufferWithSize : public IOBuffer {
public:
IOBufferWithSize();
explicit IOBufferWithSize(size_t size);
protected:
~IOBufferWithSize() override;
};
// This is a read only IOBuffer. The data is stored in a string and
// the IOBuffer interface does not provide a proper way to modify it.
class StringIOBuffer : public IOBuffer {
public:
explicit StringIOBuffer(std::string s);
private:
~StringIOBuffer() override;
std::string string_data_;
};
// This version wraps an existing IOBuffer and provides convenient functions
// to progressively read all the data.
//
// DrainableIOBuffer is useful when you have an IOBuffer that contains data
// to be written progressively, and Write() function takes an IOBuffer rather
// than char*. DrainableIOBuffer can be used as follows:
//
// // payload is the IOBuffer containing the data to be written.
// buf = gurl_base::MakeRefCounted<DrainableIOBuffer>(payload, payload_size);
//
// while (buf->BytesRemaining() > 0) {
// // Write() takes an IOBuffer. If it takes char*, we could
// // simply use the regular IOBuffer like payload->data() + offset.
// int bytes_written = Write(buf, buf->BytesRemaining());
// buf->DidConsume(bytes_written);
// }
//
class DrainableIOBuffer : public IOBuffer {
public:
DrainableIOBuffer(scoped_refptr<IOBuffer> base, size_t size);
// DidConsume() changes the |data_| pointer so that |data_| always points
// to the first unconsumed byte.
void DidConsume(int bytes);
// Returns the number of unconsumed bytes.
int BytesRemaining() const;
// Returns the number of consumed bytes.
int BytesConsumed() const;
// Seeks to an arbitrary point in the buffer. The notion of bytes consumed
// and remaining are updated appropriately.
void SetOffset(int bytes);
private:
~DrainableIOBuffer() override;
scoped_refptr<IOBuffer> base_;
int used_ = 0;
};
// This version provides a resizable buffer and a changeable offset.
//
// GrowableIOBuffer is useful when you read data progressively without
// knowing the total size in advance. GrowableIOBuffer can be used as
// follows:
//
// buf = gurl_base::MakeRefCounted<GrowableIOBuffer>();
// buf->SetCapacity(1024); // Initial capacity.
//
// while (!some_stream->IsEOF()) {
// // Double the capacity if the remaining capacity is empty.
// if (buf->RemainingCapacity() == 0)
// buf->SetCapacity(buf->capacity() * 2);
// int bytes_read = some_stream->Read(buf, buf->RemainingCapacity());
// buf->set_offset(buf->offset() + bytes_read);
// }
//
class GrowableIOBuffer : public IOBuffer {
public:
GrowableIOBuffer();
// realloc memory to the specified capacity.
void SetCapacity(int capacity);
int capacity() { return capacity_; }
// |offset| moves the |data_| pointer, allowing "seeking" in the data.
void set_offset(int offset);
int offset() { return offset_; }
int RemainingCapacity();
char* StartOfBuffer();
private:
~GrowableIOBuffer() override;
std::unique_ptr<char, gurl_base::FreeDeleter> real_data_;
int capacity_ = 0;
int offset_ = 0;
};
// This versions allows a pickle to be used as the storage for a write-style
// operation, avoiding an extra data copy.
class PickledIOBuffer : public IOBuffer {
public:
PickledIOBuffer();
gurl_base::Pickle* pickle() { return &pickle_; }
// Signals that we are done writing to the pickle and we can use it for a
// write-style IO operation.
void Done();
private:
~PickledIOBuffer() override;
gurl_base::Pickle pickle_;
};
// This class allows the creation of a temporary IOBuffer that doesn't really
// own the underlying buffer. Please use this class only as a last resort.
// A good example is the buffer for a synchronous operation, where we can be
// sure that nobody is keeping an extra reference to this object so the lifetime
// of the buffer can be completely managed by its intended owner.
class WrappedIOBuffer : public IOBuffer {
public:
WrappedIOBuffer(const char* data, size_t size);
protected:
~WrappedIOBuffer() override;
};
} // namespace net
#endif // NET_BASE_IO_BUFFER_H_

View File

@@ -166,6 +166,8 @@ add_library(url STATIC
googleurl/url/url_parse_internal.h
googleurl/url/url_util.cc
googleurl/url/url_util_internal.h
googleurl-override/base/pickle.h
googleurl-override/base/pickle.cc
# googleurl/url/scheme_host_port.cc
# googleurl/url/scheme_host_port.h
# googleurl/url/origin.cc

View File

@@ -0,0 +1,25 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_FREE_DELETER_H_
#define BASE_MEMORY_FREE_DELETER_H_
#include <stdlib.h>
namespace gurl_base {
// Function object which invokes 'free' on its parameter, which must be
// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
//
// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
// static_cast<int*>(malloc(sizeof(int))));
struct FreeDeleter {
inline void operator()(void* ptr) const {
free(ptr);
}
};
} // namespace gurl_base
#endif // BASE_MEMORY_FREE_DELETER_H_

View File

@@ -0,0 +1,459 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/pickle.h"
#include <algorithm>
#include <cstdlib>
#include <limits>
#include <ostream>
#include <type_traits>
#include "base/bits.h"
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
#include "build/build_config.h"
namespace gurl_base {
// static
const size_t Pickle::kPayloadUnit = 64;
static const size_t kCapacityReadOnly = static_cast<size_t>(-1);
PickleIterator::PickleIterator(const Pickle& pickle)
: payload_(pickle.payload()),
read_index_(0),
end_index_(pickle.payload_size()) {}
template <typename Type>
inline bool PickleIterator::ReadBuiltinType(Type* result) {
static_assert(
std::is_integral_v<Type> && !std::is_same_v<Type, bool>,
"This method is only safe with to use with types without padding bits.");
const char* read_from = GetReadPointerAndAdvance<Type>();
if (!read_from)
return false;
memcpy(result, read_from, sizeof(*result));
return true;
}
inline void PickleIterator::Advance(size_t size) {
size_t aligned_size = bits::AlignUp(size, sizeof(uint32_t));
if (end_index_ - read_index_ < aligned_size) {
read_index_ = end_index_;
} else {
read_index_ += aligned_size;
}
}
template <typename Type>
inline const char* PickleIterator::GetReadPointerAndAdvance() {
if (sizeof(Type) > end_index_ - read_index_) {
read_index_ = end_index_;
return nullptr;
}
const char* current_read_ptr = payload_ + read_index_;
Advance(sizeof(Type));
return current_read_ptr;
}
const char* PickleIterator::GetReadPointerAndAdvance(size_t num_bytes) {
if (num_bytes > end_index_ - read_index_) {
read_index_ = end_index_;
return nullptr;
}
const char* current_read_ptr = payload_ + read_index_;
Advance(num_bytes);
return current_read_ptr;
}
inline const char* PickleIterator::GetReadPointerAndAdvance(
size_t num_elements,
size_t size_element) {
// Check for size_t overflow.
size_t num_bytes;
if (!CheckMul(num_elements, size_element).AssignIfValid(&num_bytes))
return nullptr;
return GetReadPointerAndAdvance(num_bytes);
}
bool PickleIterator::ReadBool(bool* result) {
// Not all bit patterns are valid bools. Avoid undefined behavior by reading a
// type with no padding bits, then converting to bool.
uint8_t v;
if (!ReadBuiltinType(&v)) {
return false;
}
*result = v != 0;
return true;
}
bool PickleIterator::ReadInt(int* result) {
return ReadBuiltinType(result);
}
bool PickleIterator::ReadLong(long* result) {
// Always read long as a 64-bit value to ensure compatibility between 32-bit
// and 64-bit processes.
int64_t result_int64 = 0;
if (!ReadBuiltinType(&result_int64))
return false;
if (!IsValueInRangeForNumericType<long>(result_int64))
return false;
*result = static_cast<long>(result_int64);
return true;
}
bool PickleIterator::ReadUInt16(uint16_t* result) {
return ReadBuiltinType(result);
}
bool PickleIterator::ReadUInt32(uint32_t* result) {
return ReadBuiltinType(result);
}
bool PickleIterator::ReadInt64(int64_t* result) {
return ReadBuiltinType(result);
}
bool PickleIterator::ReadUInt64(uint64_t* result) {
return ReadBuiltinType(result);
}
bool PickleIterator::ReadFloat(float* result) {
// crbug.com/315213
// The source data may not be properly aligned, and unaligned float reads
// cause SIGBUS on some ARM platforms, so force using memcpy to copy the data
// into the result.
const char* read_from = GetReadPointerAndAdvance<float>();
if (!read_from)
return false;
memcpy(result, read_from, sizeof(*result));
return true;
}
bool PickleIterator::ReadDouble(double* result) {
// crbug.com/315213
// The source data may not be properly aligned, and unaligned double reads
// cause SIGBUS on some ARM platforms, so force using memcpy to copy the data
// into the result.
const char* read_from = GetReadPointerAndAdvance<double>();
if (!read_from)
return false;
memcpy(result, read_from, sizeof(*result));
return true;
}
bool PickleIterator::ReadString(std::string* result) {
size_t len;
if (!ReadLength(&len))
return false;
const char* read_from = GetReadPointerAndAdvance(len);
if (!read_from)
return false;
result->assign(read_from, len);
return true;
}
bool PickleIterator::ReadStringPiece(StringPiece* result) {
size_t len;
if (!ReadLength(&len))
return false;
const char* read_from = GetReadPointerAndAdvance(len);
if (!read_from)
return false;
*result = StringPiece(read_from, len);
return true;
}
bool PickleIterator::ReadString16(std::u16string* result) {
size_t len;
if (!ReadLength(&len))
return false;
const char* read_from = GetReadPointerAndAdvance(len, sizeof(char16_t));
if (!read_from)
return false;
result->assign(reinterpret_cast<const char16_t*>(read_from), len);
return true;
}
bool PickleIterator::ReadStringPiece16(StringPiece16* result) {
size_t len;
if (!ReadLength(&len))
return false;
const char* read_from = GetReadPointerAndAdvance(len, sizeof(char16_t));
if (!read_from)
return false;
*result = StringPiece16(reinterpret_cast<const char16_t*>(read_from), len);
return true;
}
bool PickleIterator::ReadData(const char** data, size_t* length) {
*length = 0;
*data = nullptr;
if (!ReadLength(length))
return false;
return ReadBytes(data, *length);
}
std::optional<gurl_base::span<const uint8_t>> PickleIterator::ReadData() {
const char* ptr;
size_t length;
if (!ReadData(&ptr, &length))
return absl::nullopt;
return gurl_base::as_bytes(gurl_base::make_span(ptr, length));
}
bool PickleIterator::ReadBytes(const char** data, size_t length) {
const char* read_from = GetReadPointerAndAdvance(length);
if (!read_from)
return false;
*data = read_from;
return true;
}
Pickle::Attachment::Attachment() = default;
Pickle::Attachment::~Attachment() = default;
// Payload is uint32_t aligned.
Pickle::Pickle()
: header_(nullptr),
header_size_(sizeof(Header)),
capacity_after_header_(0),
write_offset_(0) {
static_assert(gurl_base::bits::IsPowerOfTwo(Pickle::kPayloadUnit),
"Pickle::kPayloadUnit must be a power of two");
Resize(kPayloadUnit);
header_->payload_size = 0;
}
Pickle::Pickle(size_t header_size)
: header_(nullptr),
header_size_(bits::AlignUp(header_size, sizeof(uint32_t))),
capacity_after_header_(0),
write_offset_(0) {
DCHECK_GE(header_size, sizeof(Header));
DCHECK_LE(header_size, kPayloadUnit);
Resize(kPayloadUnit);
header_->payload_size = 0;
}
Pickle::Pickle(span<const uint8_t> data)
: Pickle(reinterpret_cast<const char*>(data.data()), data.size()) {}
Pickle::Pickle(const char* data, size_t data_len)
: header_(reinterpret_cast<Header*>(const_cast<char*>(data))),
header_size_(0),
capacity_after_header_(kCapacityReadOnly),
write_offset_(0) {
if (data_len >= sizeof(Header))
header_size_ = data_len - header_->payload_size;
if (header_size_ > data_len)
header_size_ = 0;
if (header_size_ != bits::AlignUp(header_size_, sizeof(uint32_t)))
header_size_ = 0;
// If there is anything wrong with the data, we're not going to use it.
if (!header_size_)
header_ = nullptr;
}
Pickle::Pickle(const Pickle& other)
: header_(nullptr),
header_size_(other.header_size_),
capacity_after_header_(0),
write_offset_(other.write_offset_) {
if (other.header_) {
Resize(other.header_->payload_size);
memcpy(header_, other.header_, header_size_ + other.header_->payload_size);
}
}
Pickle::~Pickle() {
if (capacity_after_header_ != kCapacityReadOnly)
free(header_);
}
Pickle& Pickle::operator=(const Pickle& other) {
if (this == &other) {
return *this;
}
if (capacity_after_header_ == kCapacityReadOnly) {
header_ = nullptr;
capacity_after_header_ = 0;
}
if (header_size_ != other.header_size_) {
free(header_);
header_ = nullptr;
header_size_ = other.header_size_;
}
if (other.header_) {
Resize(other.header_->payload_size);
memcpy(header_, other.header_,
other.header_size_ + other.header_->payload_size);
write_offset_ = other.write_offset_;
}
return *this;
}
void Pickle::WriteString(const StringPiece& value) {
WriteData(value.data(), value.size());
}
void Pickle::WriteString16(const StringPiece16& value) {
WriteInt(checked_cast<int>(value.size()));
WriteBytes(value.data(), value.size() * sizeof(char16_t));
}
void Pickle::WriteData(const char* data, size_t length) {
WriteInt(checked_cast<int>(length));
WriteBytes(data, length);
}
void Pickle::WriteBytes(const void* data, size_t length) {
WriteBytesCommon(data, length);
}
void Pickle::Reserve(size_t length) {
size_t data_len = bits::AlignUp(length, sizeof(uint32_t));
DCHECK_GE(data_len, length);
#ifdef ARCH_CPU_64_BITS
DCHECK_LE(data_len, std::numeric_limits<uint32_t>::max());
#endif
DCHECK_LE(write_offset_, std::numeric_limits<uint32_t>::max() - data_len);
size_t new_size = write_offset_ + data_len;
if (new_size > capacity_after_header_)
Resize(capacity_after_header_ * 2 + new_size);
}
bool Pickle::WriteAttachment(scoped_refptr<Attachment> attachment) {
return false;
}
bool Pickle::ReadAttachment(gurl_base::PickleIterator* iter,
scoped_refptr<Attachment>* attachment) const {
return false;
}
bool Pickle::HasAttachments() const {
return false;
}
void Pickle::Resize(size_t new_capacity) {
CHECK_NE(capacity_after_header_, kCapacityReadOnly);
capacity_after_header_ = bits::AlignUp(new_capacity, kPayloadUnit);
void* p = realloc(header_, GetTotalAllocatedSize());
CHECK(p);
header_ = reinterpret_cast<Header*>(p);
}
void* Pickle::ClaimBytes(size_t num_bytes) {
void* p = ClaimUninitializedBytesInternal(num_bytes);
CHECK(p);
memset(p, 0, num_bytes);
return p;
}
size_t Pickle::GetTotalAllocatedSize() const {
if (capacity_after_header_ == kCapacityReadOnly)
return 0;
return header_size_ + capacity_after_header_;
}
// static
const char* Pickle::FindNext(size_t header_size,
const char* start,
const char* end) {
size_t pickle_size = 0;
if (!PeekNext(header_size, start, end, &pickle_size))
return nullptr;
if (pickle_size > static_cast<size_t>(end - start))
return nullptr;
return start + pickle_size;
}
// static
bool Pickle::PeekNext(size_t header_size,
const char* start,
const char* end,
size_t* pickle_size) {
DCHECK_EQ(header_size, bits::AlignUp(header_size, sizeof(uint32_t)));
DCHECK_GE(header_size, sizeof(Header));
DCHECK_LE(header_size, static_cast<size_t>(kPayloadUnit));
size_t length = static_cast<size_t>(end - start);
if (length < sizeof(Header))
return false;
const Header* hdr = reinterpret_cast<const Header*>(start);
if (length < header_size)
return false;
// If payload_size causes an overflow, we return maximum possible
// pickle size to indicate that.
*pickle_size = ClampAdd(header_size, hdr->payload_size);
return true;
}
template <size_t length>
void Pickle::WriteBytesStatic(const void* data) {
WriteBytesCommon(data, length);
}
template void Pickle::WriteBytesStatic<2>(const void* data);
template void Pickle::WriteBytesStatic<4>(const void* data);
template void Pickle::WriteBytesStatic<8>(const void* data);
inline void* Pickle::ClaimUninitializedBytesInternal(size_t length) {
DCHECK_NE(kCapacityReadOnly, capacity_after_header_)
<< "oops: pickle is readonly";
size_t data_len = bits::AlignUp(length, sizeof(uint32_t));
DCHECK_GE(data_len, length);
#ifdef ARCH_CPU_64_BITS
DCHECK_LE(data_len, std::numeric_limits<uint32_t>::max());
#endif
DCHECK_LE(write_offset_, std::numeric_limits<uint32_t>::max() - data_len);
size_t new_size = write_offset_ + data_len;
if (new_size > capacity_after_header_) {
size_t new_capacity = capacity_after_header_ * 2;
const size_t kPickleHeapAlign = 4096;
if (new_capacity > kPickleHeapAlign) {
new_capacity =
bits::AlignUp(new_capacity, kPickleHeapAlign) - kPayloadUnit;
}
Resize(std::max(new_capacity, new_size));
}
char* write = mutable_payload() + write_offset_;
std::fill(write + length, write + data_len, 0); // Always initialize padding
header_->payload_size = static_cast<uint32_t>(new_size);
write_offset_ = new_size;
return write;
}
inline void Pickle::WriteBytesCommon(const void* data, size_t length) {
DCHECK_NE(kCapacityReadOnly, capacity_after_header_)
<< "oops: pickle is readonly";
MSAN_CHECK_MEM_IS_INITIALIZED(data, length);
void* write = ClaimUninitializedBytesInternal(length);
std::copy(static_cast<const char*>(data),
static_cast<const char*>(data) + length, static_cast<char*>(write));
}
} // namespace gurl_base

View File

@@ -0,0 +1,352 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_PICKLE_H_
#define BASE_PICKLE_H_
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string>
#include "base/base_export.h"
#include "base/check_op.h"
#include "base/containers/span.h"
#include "base/memory/raw_ptr_exclusion.h"
#include "base/memory/ref_counted.h"
#include "base/strings/string_piece.h"
namespace gurl_base {
class Pickle;
// PickleIterator reads data from a Pickle. The Pickle object must remain valid
// while the PickleIterator object is in use.
class BASE_EXPORT PickleIterator {
public:
PickleIterator() : payload_(nullptr), read_index_(0), end_index_(0) {}
explicit PickleIterator(const Pickle& pickle);
// Methods for reading the payload of the Pickle. To read from the start of
// the Pickle, create a PickleIterator from a Pickle. If successful, these
// methods return true. Otherwise, false is returned to indicate that the
// result could not be extracted. It is not possible to read from the iterator
// after that.
[[nodiscard]] bool ReadBool(bool* result);
[[nodiscard]] bool ReadInt(int* result);
[[nodiscard]] bool ReadLong(long* result);
[[nodiscard]] bool ReadUInt16(uint16_t* result);
[[nodiscard]] bool ReadUInt32(uint32_t* result);
[[nodiscard]] bool ReadInt64(int64_t* result);
[[nodiscard]] bool ReadUInt64(uint64_t* result);
[[nodiscard]] bool ReadFloat(float* result);
[[nodiscard]] bool ReadDouble(double* result);
[[nodiscard]] bool ReadString(std::string* result);
// The StringPiece data will only be valid for the lifetime of the message.
[[nodiscard]] bool ReadStringPiece(StringPiece* result);
[[nodiscard]] bool ReadString16(std::u16string* result);
// The StringPiece16 data will only be valid for the lifetime of the message.
[[nodiscard]] bool ReadStringPiece16(StringPiece16* result);
// A pointer to the data will be placed in |*data|, and the length will be
// placed in |*length|. The pointer placed into |*data| points into the
// message's buffer so it will be scoped to the lifetime of the message (or
// until the message data is mutated). Do not keep the pointer around!
[[nodiscard]] bool ReadData(const char** data, size_t* length);
// Similar, but using gurl_base::span for convenience.
[[nodiscard]] std::optional<gurl_base::span<const uint8_t>> ReadData();
// A pointer to the data will be placed in |*data|. The caller specifies the
// number of bytes to read, and ReadBytes will validate this length. The
// pointer placed into |*data| points into the message's buffer so it will be
// scoped to the lifetime of the message (or until the message data is
// mutated). Do not keep the pointer around!
[[nodiscard]] bool ReadBytes(const char** data, size_t length);
// A version of ReadInt() that checks for the result not being negative. Use
// it for reading the object sizes.
[[nodiscard]] bool ReadLength(size_t* result) {
int result_int;
if (!ReadInt(&result_int) || result_int < 0)
return false;
*result = static_cast<size_t>(result_int);
return true;
}
// Skips bytes in the read buffer and returns true if there are at least
// num_bytes available. Otherwise, does nothing and returns false.
[[nodiscard]] bool SkipBytes(size_t num_bytes) {
return !!GetReadPointerAndAdvance(num_bytes);
}
bool ReachedEnd() const { return read_index_ == end_index_; }
private:
// Read Type from Pickle.
template <typename Type>
bool ReadBuiltinType(Type* result);
// Advance read_index_ but do not allow it to exceed end_index_.
// Keeps read_index_ aligned.
void Advance(size_t size);
// Get read pointer for Type and advance read pointer.
template<typename Type>
const char* GetReadPointerAndAdvance();
// Get read pointer for |num_bytes| and advance read pointer. This method
// checks num_bytes for wrapping.
const char* GetReadPointerAndAdvance(size_t num_bytes);
// Get read pointer for (num_elements * size_element) bytes and advance read
// pointer. This method checks for overflow and wrapping.
const char* GetReadPointerAndAdvance(size_t num_elements,
size_t size_element);
const char* payload_; // Start of our pickle's payload.
size_t read_index_; // Offset of the next readable byte in payload.
size_t end_index_; // Payload size.
};
// This class provides facilities for basic binary value packing and unpacking.
//
// The Pickle class supports appending primitive values (ints, strings, etc.)
// to a pickle instance. The Pickle instance grows its internal memory buffer
// dynamically to hold the sequence of primitive values. The internal memory
// buffer is exposed as the "data" of the Pickle. This "data" can be passed
// to a Pickle object to initialize it for reading.
//
// When reading from a Pickle object, it is important for the consumer to know
// what value types to read and in what order to read them as the Pickle does
// not keep track of the type of data written to it.
//
// The Pickle's data has a header which contains the size of the Pickle's
// payload. It can optionally support additional space in the header. That
// space is controlled by the header_size parameter passed to the Pickle
// constructor.
//
class BASE_EXPORT Pickle {
public:
// Auxiliary data attached to a Pickle. Pickle must be subclassed along with
// this interface in order to provide a concrete implementation of support
// for attachments. The base Pickle implementation does not accept
// attachments.
class BASE_EXPORT Attachment : public RefCountedThreadSafe<Attachment> {
public:
Attachment();
Attachment(const Attachment&) = delete;
Attachment& operator=(const Attachment&) = delete;
protected:
friend class RefCountedThreadSafe<Attachment>;
virtual ~Attachment();
};
// Initialize a Pickle object using the default header size.
Pickle();
// Initialize a Pickle object with the specified header size in bytes, which
// must be greater-than-or-equal-to sizeof(Pickle::Header). The header size
// will be rounded up to ensure that the header size is 32bit-aligned.
explicit Pickle(size_t header_size);
// Initializes a Pickle from a const block of data. The data is not copied;
// instead the data is merely referenced by this Pickle. Only const methods
// should be used on the Pickle when initialized this way. The header
// padding size is deduced from the data length.
explicit Pickle(span<const uint8_t> data);
// TODO(crbug.com/1490484): Migrate callers of this overload to the span
// version.
Pickle(const char* data, size_t data_len);
// Initializes a Pickle as a deep copy of another Pickle.
Pickle(const Pickle& other);
// Note: Other classes are derived from this class, and they may well
// delete through this parent class, e.g. std::uniuqe_ptr<Pickle> exists
// in several places the code.
virtual ~Pickle();
// Performs a deep copy.
Pickle& operator=(const Pickle& other);
// Returns the number of bytes written in the Pickle, including the header.
size_t size() const {
return header_ ? header_size_ + header_->payload_size : 0;
}
// Returns the data for this Pickle.
const uint8_t* data() const {
return reinterpret_cast<const uint8_t*>(header_);
}
// Handy method to simplify calling data() with a reinterpret_cast.
const char* data_as_char() const {
return reinterpret_cast<const char*>(data());
}
// Returns the effective memory capacity of this Pickle, that is, the total
// number of bytes currently dynamically allocated or 0 in the case of a
// read-only Pickle. This should be used only for diagnostic / profiling
// purposes.
size_t GetTotalAllocatedSize() const;
// Methods for adding to the payload of the Pickle. These values are
// appended to the end of the Pickle's payload. When reading values from a
// Pickle, it is important to read them in the order in which they were added
// to the Pickle.
void WriteBool(bool value) { WriteInt(value ? 1 : 0); }
void WriteInt(int value) { WritePOD(value); }
void WriteLong(long value) {
// Always write long as a 64-bit value to ensure compatibility between
// 32-bit and 64-bit processes.
WritePOD(static_cast<int64_t>(value));
}
void WriteUInt16(uint16_t value) { WritePOD(value); }
void WriteUInt32(uint32_t value) { WritePOD(value); }
void WriteInt64(int64_t value) { WritePOD(value); }
void WriteUInt64(uint64_t value) { WritePOD(value); }
void WriteFloat(float value) { WritePOD(value); }
void WriteDouble(double value) { WritePOD(value); }
void WriteString(const StringPiece& value);
void WriteString16(const StringPiece16& value);
// "Data" is a blob with a length. When you read it out you will be given the
// length. See also WriteBytes.
void WriteData(const char* data, size_t length);
// "Bytes" is a blob with no length. The caller must specify the length both
// when reading and writing. It is normally used to serialize PoD types of a
// known size. See also WriteData.
void WriteBytes(const void* data, size_t length);
// WriteAttachment appends |attachment| to the pickle. It returns
// false iff the set is full or if the Pickle implementation does not support
// attachments.
virtual bool WriteAttachment(scoped_refptr<Attachment> attachment);
// ReadAttachment parses an attachment given the parsing state |iter| and
// writes it to |*attachment|. It returns true on success.
virtual bool ReadAttachment(gurl_base::PickleIterator* iter,
scoped_refptr<Attachment>* attachment) const;
// Indicates whether the pickle has any attachments.
virtual bool HasAttachments() const;
// Reserves space for upcoming writes when multiple writes will be made and
// their sizes are computed in advance. It can be significantly faster to call
// Reserve() before calling WriteFoo() multiple times.
void Reserve(size_t additional_capacity);
// Payload follows after allocation of Header (header size is customizable).
struct Header {
uint32_t payload_size; // Specifies the size of the payload.
};
// Returns the header, cast to a user-specified type T. The type T must be a
// subclass of Header and its size must correspond to the header_size passed
// to the Pickle constructor.
template <class T>
T* headerT() {
DCHECK_EQ(header_size_, sizeof(T));
return static_cast<T*>(header_);
}
template <class T>
const T* headerT() const {
DCHECK_EQ(header_size_, sizeof(T));
return static_cast<const T*>(header_);
}
// The payload is the pickle data immediately following the header.
size_t payload_size() const {
return header_ ? header_->payload_size : 0;
}
const char* payload() const {
return reinterpret_cast<const char*>(header_) + header_size_;
}
// Returns the address of the byte immediately following the currently valid
// header + payload.
const char* end_of_payload() const {
// This object may be invalid.
return header_ ? payload() + payload_size() : NULL;
}
protected:
// Returns size of the header, which can have default value, set by user or
// calculated by passed raw data.
size_t header_size() const { return header_size_; }
char* mutable_payload() {
return reinterpret_cast<char*>(header_) + header_size_;
}
size_t capacity_after_header() const {
return capacity_after_header_;
}
// Resize the capacity, note that the input value should not include the size
// of the header.
void Resize(size_t new_capacity);
// Claims |num_bytes| bytes of payload. This is similar to Reserve() in that
// it may grow the capacity, but it also advances the write offset of the
// pickle by |num_bytes|. Claimed memory, including padding, is zeroed.
//
// Returns the address of the first byte claimed.
void* ClaimBytes(size_t num_bytes);
// Find the end of the pickled data that starts at range_start. Returns NULL
// if the entire Pickle is not found in the given data range.
static const char* FindNext(size_t header_size,
const char* range_start,
const char* range_end);
// Parse pickle header and return total size of the pickle. Data range
// doesn't need to contain entire pickle.
// Returns true if pickle header was found and parsed. Callers must check
// returned |pickle_size| for sanity (against maximum message size, etc).
// NOTE: when function successfully parses a header, but encounters an
// overflow during pickle size calculation, it sets |pickle_size| to the
// maximum size_t value and returns true.
static bool PeekNext(size_t header_size,
const char* range_start,
const char* range_end,
size_t* pickle_size);
// The allocation granularity of the payload.
static const size_t kPayloadUnit;
private:
friend class PickleIterator;
// `header_` is not a raw_ptr<...> for performance reasons (based on analysis
// of sampling profiler data).
RAW_PTR_EXCLUSION Header* header_;
size_t header_size_; // Supports extra data between header and payload.
// Allocation size of payload (or -1 if allocation is const). Note: this
// doesn't count the header.
size_t capacity_after_header_;
// The offset at which we will write the next field. Note: this doesn't count
// the header.
size_t write_offset_;
// Just like WriteBytes, but with a compile-time size, for performance.
template<size_t length> void BASE_EXPORT WriteBytesStatic(const void* data);
// Writes a POD by copying its bytes.
template <typename T> bool WritePOD(const T& data) {
WriteBytesStatic<sizeof(data)>(&data);
return true;
}
inline void* ClaimUninitializedBytesInternal(size_t num_bytes);
inline void WriteBytesCommon(const void* data, size_t length);
};
} // namespace gurl_base
#endif // BASE_PICKLE_H_