0.4.0-pre.14

This is a special version (since the repository hasn't been updated for a while). It includes partial updates from 0.3 to 0.4, along with several fixes for 0.4.0-pre.13.

这是一个特殊版本(因为一段时间没有更新存储库),它包含0.3至0.4的部分更新以及对0.4.0-pre.13的几处修复。
This commit is contained in:
wisdgod
2025-12-23 11:18:28 +08:00
parent 280c2d71a8
commit 65a390d4f2
428 changed files with 66005 additions and 15324 deletions

View File

@@ -1,165 +0,0 @@
#![cfg(all(unix, feature = "clock", feature = "std"))]
use std::{path, process, thread};
#[cfg(target_os = "linux")]
use chrono::Days;
use chrono::{Datelike, Local, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Timelike};
fn verify_against_date_command_local(path: &'static str, dt: NaiveDateTime) {
let output = process::Command::new(path)
.arg("-d")
.arg(format!("{}-{:02}-{:02} {:02}:05:01", dt.year(), dt.month(), dt.day(), dt.hour()))
.arg("+%Y-%m-%d %H:%M:%S %:z")
.output()
.unwrap();
let date_command_str = String::from_utf8(output.stdout).unwrap();
// The below would be preferred. At this stage neither earliest() or latest()
// seems to be consistent with the output of the `date` command, so we simply
// compare both.
// let local = Local
// .with_ymd_and_hms(year, month, day, hour, 5, 1)
// // looks like the "date" command always returns a given time when it is ambiguous
// .earliest();
// if let Some(local) = local {
// assert_eq!(format!("{}\n", local), date_command_str);
// } else {
// // we are in a "Spring forward gap" due to DST, and so date also returns ""
// assert_eq!("", date_command_str);
// }
// This is used while a decision is made whether the `date` output needs to
// be exactly matched, or whether MappedLocalTime::Ambiguous should be handled
// differently
let date = NaiveDate::from_ymd_opt(dt.year(), dt.month(), dt.day()).unwrap();
match Local.from_local_datetime(&date.and_hms_opt(dt.hour(), 5, 1).unwrap()) {
chrono::MappedLocalTime::Ambiguous(a, b) => assert!(
format!("{}\n", a) == date_command_str || format!("{}\n", b) == date_command_str
),
chrono::MappedLocalTime::Single(a) => {
assert_eq!(format!("{}\n", a), date_command_str);
}
chrono::MappedLocalTime::None => {
assert_eq!("", date_command_str);
}
}
}
/// path to Unix `date` command. Should work on most Linux and Unixes. Not the
/// path for MacOS (/bin/date) which uses a different version of `date` with
/// different arguments (so it won't run which is okay).
/// for testing only
#[allow(dead_code)]
#[cfg(not(target_os = "aix"))]
const DATE_PATH: &str = "/usr/bin/date";
#[allow(dead_code)]
#[cfg(target_os = "aix")]
const DATE_PATH: &str = "/opt/freeware/bin/date";
#[cfg(test)]
/// test helper to sanity check the date command behaves as expected
/// asserts the command succeeded
fn assert_run_date_version() {
// note environment variable `LANG`
match std::env::var_os("LANG") {
Some(lang) => eprintln!("LANG: {:?}", lang),
None => eprintln!("LANG not set"),
}
let out = process::Command::new(DATE_PATH).arg("--version").output().unwrap();
let stdout = String::from_utf8(out.stdout).unwrap();
let stderr = String::from_utf8(out.stderr).unwrap();
// note the `date` binary version
eprintln!("command: {:?} --version\nstdout: {:?}\nstderr: {:?}", DATE_PATH, stdout, stderr);
assert!(out.status.success(), "command failed: {:?} --version", DATE_PATH);
}
#[test]
fn try_verify_against_date_command() {
if !path::Path::new(DATE_PATH).exists() {
eprintln!("date command {:?} not found, skipping", DATE_PATH);
return;
}
assert_run_date_version();
eprintln!(
"Run command {:?} for every hour from 1975 to 2077, skipping some years...",
DATE_PATH,
);
let mut children = vec![];
for year in [1975, 1976, 1977, 2020, 2021, 2022, 2073, 2074, 2075, 2076, 2077].iter() {
children.push(thread::spawn(|| {
let mut date = NaiveDate::from_ymd_opt(*year, 1, 1).unwrap().and_time(NaiveTime::MIN);
let end = NaiveDate::from_ymd_opt(*year + 1, 1, 1).unwrap().and_time(NaiveTime::MIN);
while date <= end {
verify_against_date_command_local(DATE_PATH, date);
date += chrono::TimeDelta::try_hours(1).unwrap();
}
}));
}
for child in children {
// Wait for the thread to finish. Returns a result.
let _ = child.join();
}
}
#[cfg(target_os = "linux")]
fn verify_against_date_command_format_local(path: &'static str, dt: NaiveDateTime) {
let required_format =
"d%d D%D F%F H%H I%I j%j k%k l%l m%m M%M q%q S%S T%T u%u U%U w%w W%W X%X y%y Y%Y z%:z";
// a%a - depends from localization
// A%A - depends from localization
// b%b - depends from localization
// B%B - depends from localization
// h%h - depends from localization
// c%c - depends from localization
// p%p - depends from localization
// r%r - depends from localization
// x%x - fails, date is dd/mm/yyyy, chrono is dd/mm/yy, same as %D
// Z%Z - too many ways to represent it, will most likely fail
let output = process::Command::new(path)
.env("LANG", "c")
.env("LC_ALL", "c")
.arg("-d")
.arg(format!(
"{}-{:02}-{:02} {:02}:{:02}:{:02}",
dt.year(),
dt.month(),
dt.day(),
dt.hour(),
dt.minute(),
dt.second()
))
.arg(format!("+{}", required_format))
.output()
.unwrap();
let date_command_str = String::from_utf8(output.stdout).unwrap();
let date = NaiveDate::from_ymd_opt(dt.year(), dt.month(), dt.day()).unwrap();
let ldt = Local
.from_local_datetime(&date.and_hms_opt(dt.hour(), dt.minute(), dt.second()).unwrap())
.unwrap();
let formatted_date = format!("{}\n", ldt.format(required_format));
assert_eq!(date_command_str, formatted_date);
}
#[test]
#[cfg(target_os = "linux")]
fn try_verify_against_date_command_format() {
if !path::Path::new(DATE_PATH).exists() {
eprintln!("date command {:?} not found, skipping", DATE_PATH);
return;
}
assert_run_date_version();
let mut date = NaiveDate::from_ymd_opt(1970, 1, 1).unwrap().and_hms_opt(12, 11, 13).unwrap();
while date.year() < 2008 {
verify_against_date_command_format_local(DATE_PATH, date);
date = date + Days::new(55);
}
}

View File

@@ -1,89 +0,0 @@
//! Run this test with:
//! `env TZ="$(date +%z)" NOW="$(date +%s)" wasm-pack test --node -- --features wasmbind`
//!
//! The `TZ` and `NOW` variables are used to compare the results inside the WASM environment with
//! the host system.
//! The check will fail if the local timezone does not match one of the timezones defined below.
#![cfg(all(
target_arch = "wasm32",
feature = "wasmbind",
feature = "clock",
not(any(target_os = "emscripten", target_os = "wasi"))
))]
use chrono::prelude::*;
use wasm_bindgen_test::*;
#[wasm_bindgen_test]
fn now() {
let utc: DateTime<Utc> = Utc::now();
let local: DateTime<Local> = Local::now();
// Ensure time set by the test script is correct
let now = env!("NOW");
let actual = NaiveDateTime::parse_from_str(&now, "%s").unwrap().and_utc();
let diff = utc - actual;
assert!(
diff < chrono::TimeDelta::try_minutes(5).unwrap(),
"expected {} - {} == {} < 5m (env var: {})",
utc,
actual,
diff,
now,
);
let tz = env!("TZ");
eprintln!("testing with tz={}", tz);
// Ensure offset retrieved when getting local time is correct
let expected_offset = match tz {
"ACST-9:30" => FixedOffset::east_opt(19 * 30 * 60).unwrap(),
"Asia/Katmandu" => FixedOffset::east_opt(23 * 15 * 60).unwrap(), // No DST thankfully
"EDT" | "EST4" | "-0400" => FixedOffset::east_opt(-4 * 60 * 60).unwrap(),
"EST" | "-0500" => FixedOffset::east_opt(-5 * 60 * 60).unwrap(),
"UTC0" | "+0000" => FixedOffset::east_opt(0).unwrap(),
tz => panic!("unexpected TZ {}", tz),
};
assert_eq!(
&expected_offset,
local.offset(),
"expected: {:?} local: {:?}",
expected_offset,
local.offset(),
);
}
#[wasm_bindgen_test]
fn from_is_exact() {
let now = js_sys::Date::new_0();
let dt = DateTime::<Utc>::from(now.clone());
assert_eq!(now.get_time() as i64, dt.timestamp_millis());
}
#[wasm_bindgen_test]
fn local_from_local_datetime() {
let now = Local::now();
let ndt = now.naive_local();
let res = match Local.from_local_datetime(&ndt).single() {
Some(v) => v,
None => panic! {"Required for test!"},
};
assert_eq!(now, res);
}
#[wasm_bindgen_test]
fn convert_all_parts_with_milliseconds() {
let time: DateTime<Utc> = "2020-12-01T03:01:55.974Z".parse().unwrap();
let js_date = js_sys::Date::from(time);
assert_eq!(js_date.get_utc_full_year(), 2020);
assert_eq!(js_date.get_utc_month(), 11); // months are numbered 0..=11
assert_eq!(js_date.get_utc_date(), 1);
assert_eq!(js_date.get_utc_hours(), 3);
assert_eq!(js_date.get_utc_minutes(), 1);
assert_eq!(js_date.get_utc_seconds(), 55);
assert_eq!(js_date.get_utc_milliseconds(), 974);
}

View File

@@ -1,28 +0,0 @@
#![cfg(all(windows, feature = "clock", feature = "std"))]
use std::fs;
use windows_bindgen::bindgen;
#[test]
fn gen_bindings() {
let input = "src/offset/local/win_bindings.txt";
let output = "src/offset/local/win_bindings.rs";
let existing = fs::read_to_string(output).unwrap();
bindgen(["--no-deps", "--etc", input]).unwrap();
// Check the output is the same as before.
// Depending on the git configuration the file may have been checked out with `\r\n` newlines or
// with `\n`. Compare line-by-line to ignore this difference.
let mut new = fs::read_to_string(output).unwrap();
if existing.contains("\r\n") && !new.contains("\r\n") {
new = new.replace("\n", "\r\n");
} else if !existing.contains("\r\n") && new.contains("\r\n") {
new = new.replace("\r\n", "\n");
}
similar_asserts::assert_eq!(existing, new);
if !new.lines().eq(existing.lines()) {
panic!("generated file `{output}` is changed.");
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "chrono"
version = "0.4.41"
version = "0.4.42"
description = "Date and time library for Rust"
homepage = "https://github.com/chronotope/chrono"
documentation = "https://docs.rs/chrono/"
@@ -25,14 +25,16 @@ winapi = ["windows-link"]
std = ["alloc"]
clock = ["winapi", "iana-time-zone", "now"]
now = ["std"]
core-error = []
oldtime = []
wasmbind = ["wasm-bindgen", "js-sys"]
unstable-locales = ["pure-rust-locales"]
# Note that rkyv-16, rkyv-32, and rkyv-64 are mutually exclusive.
rkyv = ["dep:rkyv", "rkyv/pointer_width_32"]
rkyv-16 = ["dep:rkyv", "rkyv?/pointer_width_16"]
rkyv-32 = ["dep:rkyv", "rkyv?/pointer_width_32"]
rkyv-64 = ["dep:rkyv", "rkyv?/pointer_width_64"]
rkyv-validation = ["rkyv?/bytecheck"]
rkyv-validation = ["rkyv?/validation"]
# Features for internal use only:
__internal_bench = []
@@ -40,7 +42,7 @@ __internal_bench = []
num-traits = { version = "0.2", default-features = false }
serde = { version = "1.0.99", default-features = false, optional = true }
pure-rust-locales = { version = "0.8", optional = true }
rkyv = { version = "0.8.10", optional = true, default-features = false, features = ["std"]}
rkyv = { version = "0.8", optional = true, default-features = false }
arbitrary = { version = "1.0.0", features = ["derive"], optional = true }
[target.'cfg(all(target_arch = "wasm32", not(any(target_os = "emscripten", target_os = "wasi"))))'.dependencies]
@@ -48,10 +50,10 @@ wasm-bindgen = { version = "0.2", optional = true }
js-sys = { version = "0.3", optional = true } # contains FFI bindings for the JS Date API
[target.'cfg(windows)'.dependencies]
windows-link = { version = "0.1", optional = true }
windows-link = { version = "0.2", optional = true }
[target.'cfg(windows)'.dev-dependencies]
windows-bindgen = { version = "0.62" } # MSRV is 1.74
windows-bindgen = { version = "0.63" } # MSRV is 1.74
[target.'cfg(unix)'.dependencies]
iana-time-zone = { version = "0.1.45", optional = true, features = ["fallback"] }

View File

@@ -10,8 +10,8 @@ use core::cmp::Ordering;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use core::{fmt, hash};
// #[cfg(feature = "rkyv")]
// use rkyv::{Archive, Deserialize, Serialize};
#[cfg(feature = "rkyv")]
use rkyv::{Archive, Deserialize, Serialize};
#[cfg(all(feature = "unstable-locales", feature = "alloc"))]
use crate::format::Locale;
@@ -54,7 +54,7 @@ use crate::{DateTime, Datelike, TimeDelta, Weekday};
/// even though the raw calculation between `NaiveDate` and `TimeDelta` may not.
#[deprecated(since = "0.4.23", note = "Use `NaiveDate` or `DateTime<Tz>` instead")]
#[derive(Clone)]
// #[cfg_attr(feature = "rkyv", derive(Archive, Deserialize, Serialize))]
#[cfg_attr(feature = "rkyv", derive(Archive, Deserialize, Serialize))]
pub struct Date<Tz: TimeZone> {
date: NaiveDate,
offset: Tz::Offset,

View File

@@ -31,7 +31,7 @@ use crate::offset::{FixedOffset, LocalResult, Offset, TimeZone, Utc};
use crate::{Datelike, Months, TimeDelta, Timelike, Weekday};
use crate::{expect, try_opt};
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
/// documented at re-export site
@@ -48,7 +48,7 @@ mod tests;
/// [`TimeZone`](./offset/trait.TimeZone.html) implementations.
#[derive(Clone)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq, PartialOrd))
)]
@@ -713,6 +713,61 @@ impl<Tz: TimeZone> DateTime<Tz> {
}
impl DateTime<Utc> {
/// Makes a new `DateTime<Utc>` from the number of non-leap seconds
/// since January 1, 1970 0:00:00 UTC (aka "UNIX timestamp").
///
/// This is a convenience wrapper around [`DateTime::from_timestamp`],
/// which is useful in functions like [`Iterator::map`] to avoid a closure.
///
/// This is guaranteed to round-trip with regard to [`timestamp`](DateTime::timestamp).
///
/// If you need to create a `DateTime` with a [`TimeZone`] different from [`Utc`], use
/// [`TimeZone::timestamp_opt`] or [`DateTime::with_timezone`]; if you need to create a
/// `DateTime` with more precision, use [`DateTime::from_timestamp_micros`],
/// [`DateTime::from_timestamp_millis`], or [`DateTime::from_timestamp_nanos`].
///
/// # Errors
///
/// Returns `None` on out-of-range number of seconds,
/// otherwise returns `Some(DateTime {...})`.
///
/// # Examples
///
/// Using [`Option::and_then`]:
///
/// ```
/// # use chrono::DateTime;
/// let maybe_timestamp: Option<i64> = Some(1431648000);
/// let maybe_dt = maybe_timestamp.and_then(DateTime::from_timestamp_secs);
///
/// assert!(maybe_dt.is_some());
/// assert_eq!(maybe_dt.unwrap().to_string(), "2015-05-15 00:00:00 UTC");
/// ```
///
/// Using [`Iterator::map`]:
///
/// ```
/// # use chrono::{DateTime, Utc};
/// let v = vec![i64::MIN, 1_000_000_000, 1_234_567_890, i64::MAX];
/// let timestamps: Vec<Option<DateTime<Utc>>> = v
/// .into_iter()
/// .map(DateTime::from_timestamp_secs)
/// .collect();
///
/// assert_eq!(vec![
/// None,
/// Some(DateTime::parse_from_rfc3339("2001-09-09 01:46:40Z").unwrap().to_utc()),
/// Some(DateTime::parse_from_rfc3339("2009-02-13 23:31:30Z").unwrap().to_utc()),
/// None,
/// ], timestamps);
/// ```
///
#[inline]
#[must_use]
pub const fn from_timestamp_secs(secs: i64) -> Option<Self> {
Self::from_timestamp(secs, 0)
}
/// Makes a new `DateTime<Utc>` from the number of non-leap seconds
/// since January 1, 1970 0:00:00 UTC (aka "UNIX timestamp")
/// and the number of nanoseconds since the last whole non-leap second.
@@ -1930,12 +1985,12 @@ where
}
}
/// Number of days between Januari 1, 1970 and December 31, 1 BCE which we define to be day 0.
/// Number of days between January 1, 1970 and December 31, 1 BCE which we define to be day 0.
/// 4 full leap year cycles until December 31, 1600 4 * 146097 = 584388
/// 1 day until January 1, 1601 1
/// 369 years until Januari 1, 1970 369 * 365 = 134685
/// 369 years until January 1, 1970 369 * 365 = 134685
/// of which floor(369 / 4) are leap years floor(369 / 4) = 92
/// except for 1700, 1800 and 1900 -3 +
/// --------
/// 719163
const UNIX_EPOCH_DAY: i64 = 719_163;
pub(crate) const UNIX_EPOCH_DAY: i64 = 719_163;

View File

@@ -50,8 +50,8 @@ impl<Tz: TimeZone> ser::Serialize for DateTime<Tz> {
}
}
#[doc(hidden)]
#[derive(Debug)]
#[allow(missing_docs)]
#[allow(missing_debug_implementations)]
pub struct DateTimeVisitor;
impl de::Visitor<'_> for DateTimeVisitor {
@@ -244,11 +244,7 @@ pub mod ts_nanoseconds {
where
E: de::Error,
{
DateTime::from_timestamp(
value.div_euclid(1_000_000_000),
(value.rem_euclid(1_000_000_000)) as u32,
)
.ok_or_else(|| invalid_ts(value))
Ok(DateTime::from_timestamp_nanos(value))
}
/// Deserialize a timestamp in nanoseconds since the epoch
@@ -526,11 +522,7 @@ pub mod ts_microseconds {
where
E: de::Error,
{
DateTime::from_timestamp(
value.div_euclid(1_000_000),
(value.rem_euclid(1_000_000) * 1000) as u32,
)
.ok_or_else(|| invalid_ts(value))
DateTime::from_timestamp_micros(value).ok_or_else(|| invalid_ts(value))
}
/// Deserialize a timestamp in milliseconds since the epoch
@@ -1066,7 +1058,7 @@ pub mod ts_seconds {
where
E: de::Error,
{
DateTime::from_timestamp(value, 0).ok_or_else(|| invalid_ts(value))
DateTime::from_timestamp_secs(value).ok_or_else(|| invalid_ts(value))
}
/// Deserialize a timestamp in seconds since the epoch
@@ -1077,7 +1069,7 @@ pub mod ts_seconds {
if value > i64::MAX as u64 {
Err(invalid_ts(value))
} else {
DateTime::from_timestamp(value as i64, 0).ok_or_else(|| invalid_ts(value))
DateTime::from_timestamp_secs(value as i64).ok_or_else(|| invalid_ts(value))
}
}
}

View File

@@ -154,7 +154,10 @@ fn test_datetime_from_timestamp_millis() {
// that of `from_timestamp_opt`.
let secs_test = [0, 1, 2, 1000, 1234, 12345678, -1, -2, -1000, -12345678];
for secs in secs_test.iter().cloned() {
assert_eq!(DateTime::from_timestamp_millis(secs * 1000), DateTime::from_timestamp(secs, 0));
assert_eq!(
DateTime::from_timestamp_millis(secs * 1000),
DateTime::from_timestamp_secs(secs)
);
}
}
@@ -191,7 +194,7 @@ fn test_datetime_from_timestamp_micros() {
for secs in secs_test.iter().copied() {
assert_eq!(
DateTime::from_timestamp_micros(secs * 1_000_000),
DateTime::from_timestamp(secs, 0)
DateTime::from_timestamp_secs(secs)
);
}
}
@@ -242,24 +245,34 @@ fn test_datetime_from_timestamp_nanos() {
for secs in secs_test.iter().copied() {
assert_eq!(
Some(DateTime::from_timestamp_nanos(secs * 1_000_000_000)),
DateTime::from_timestamp(secs, 0)
DateTime::from_timestamp_secs(secs)
);
}
}
#[test]
fn test_datetime_from_timestamp_secs() {
let valid = [-2208936075, 0, 119731017, 1234567890, 2034061609];
for timestamp_secs in valid.iter().copied() {
let datetime = DateTime::from_timestamp_secs(timestamp_secs).unwrap();
assert_eq!(timestamp_secs, datetime.timestamp());
assert_eq!(DateTime::from_timestamp(timestamp_secs, 0).unwrap(), datetime);
}
}
#[test]
fn test_datetime_from_timestamp() {
let from_timestamp = |secs| DateTime::from_timestamp(secs, 0);
let ymdhms = |y, m, d, h, n, s| {
NaiveDate::from_ymd_opt(y, m, d).unwrap().and_hms_opt(h, n, s).unwrap().and_utc()
};
assert_eq!(from_timestamp(-1), Some(ymdhms(1969, 12, 31, 23, 59, 59)));
assert_eq!(from_timestamp(0), Some(ymdhms(1970, 1, 1, 0, 0, 0)));
assert_eq!(from_timestamp(1), Some(ymdhms(1970, 1, 1, 0, 0, 1)));
assert_eq!(from_timestamp(1_000_000_000), Some(ymdhms(2001, 9, 9, 1, 46, 40)));
assert_eq!(from_timestamp(0x7fffffff), Some(ymdhms(2038, 1, 19, 3, 14, 7)));
assert_eq!(from_timestamp(i64::MIN), None);
assert_eq!(from_timestamp(i64::MAX), None);
assert_eq!(DateTime::from_timestamp_secs(-1), Some(ymdhms(1969, 12, 31, 23, 59, 59)));
assert_eq!(DateTime::from_timestamp_secs(0), Some(ymdhms(1970, 1, 1, 0, 0, 0)));
assert_eq!(DateTime::from_timestamp_secs(1), Some(ymdhms(1970, 1, 1, 0, 0, 1)));
assert_eq!(DateTime::from_timestamp_secs(1_000_000_000), Some(ymdhms(2001, 9, 9, 1, 46, 40)));
assert_eq!(DateTime::from_timestamp_secs(0x7fffffff), Some(ymdhms(2038, 1, 19, 3, 14, 7)));
assert_eq!(DateTime::from_timestamp_secs(i64::MIN), None);
assert_eq!(DateTime::from_timestamp_secs(i64::MAX), None);
}
#[test]
@@ -1034,7 +1047,7 @@ fn test_parse_datetime_utc() {
Ok(d) => d,
Err(e) => panic!("parsing `{s}` has failed: {e}"),
};
let s_ = format!("{:?}", d);
let s_ = format!("{d:?}");
// `s` and `s_` may differ, but `s.parse()` and `s_.parse()` must be same
let d_ = match s_.parse::<DateTime<Utc>>() {
Ok(d) => d,

View File

@@ -109,7 +109,7 @@ impl<'a, I: Iterator<Item = B> + Clone, B: Borrow<Item<'a>>> DelayedFormat<I> {
/// let mut buffer = String::new();
/// let _ = df.write_to(&mut buffer);
/// ```
pub fn write_to(&self, w: &mut impl Write) -> fmt::Result {
pub fn write_to(&self, w: &mut (impl Write + ?Sized)) -> fmt::Result {
for item in self.items.clone() {
match *item.borrow() {
Item::Literal(s) | Item::Space(s) => w.write_str(s),
@@ -124,14 +124,19 @@ impl<'a, I: Iterator<Item = B> + Clone, B: Borrow<Item<'a>>> DelayedFormat<I> {
}
#[cfg(feature = "alloc")]
fn format_numeric(&self, w: &mut impl Write, spec: &Numeric, pad: Pad) -> fmt::Result {
fn format_numeric(
&self,
w: &mut (impl Write + ?Sized),
spec: &Numeric,
pad: Pad,
) -> fmt::Result {
use self::Numeric::*;
fn write_one(w: &mut impl Write, v: u8) -> fmt::Result {
fn write_one(w: &mut (impl Write + ?Sized), v: u8) -> fmt::Result {
w.write_char((b'0' + v) as char)
}
fn write_two(w: &mut impl Write, v: u8, pad: Pad) -> fmt::Result {
fn write_two(w: &mut (impl Write + ?Sized), v: u8, pad: Pad) -> fmt::Result {
let ones = b'0' + v % 10;
match (v / 10, pad) {
(0, Pad::None) => {}
@@ -142,7 +147,7 @@ impl<'a, I: Iterator<Item = B> + Clone, B: Borrow<Item<'a>>> DelayedFormat<I> {
}
#[inline]
fn write_year(w: &mut impl Write, year: i32, pad: Pad) -> fmt::Result {
fn write_year(w: &mut (impl Write + ?Sized), year: i32, pad: Pad) -> fmt::Result {
if (1000..=9999).contains(&year) {
// fast path
write_hundreds(w, (year / 100) as u8)?;
@@ -153,7 +158,7 @@ impl<'a, I: Iterator<Item = B> + Clone, B: Borrow<Item<'a>>> DelayedFormat<I> {
}
fn write_n(
w: &mut impl Write,
w: &mut (impl Write + ?Sized),
n: usize,
v: i64,
pad: Pad,
@@ -214,7 +219,7 @@ impl<'a, I: Iterator<Item = B> + Clone, B: Borrow<Item<'a>>> DelayedFormat<I> {
}
#[cfg(feature = "alloc")]
fn format_fixed(&self, w: &mut impl Write, spec: &Fixed) -> fmt::Result {
fn format_fixed(&self, w: &mut (impl Write + ?Sized), spec: &Fixed) -> fmt::Result {
use Fixed::*;
use InternalInternal::*;
@@ -387,7 +392,7 @@ pub fn format_item(
#[cfg(any(feature = "alloc", feature = "serde"))]
impl OffsetFormat {
/// Writes an offset from UTC with the format defined by `self`.
fn format(&self, w: &mut impl Write, off: FixedOffset) -> fmt::Result {
fn format(&self, w: &mut (impl Write + ?Sized), off: FixedOffset) -> fmt::Result {
let off = off.local_minus_utc();
if self.allow_zulu && off == 0 {
w.write_char('Z')?;
@@ -495,8 +500,8 @@ pub enum SecondsFormat {
/// Writes the date, time and offset to the string. same as `%Y-%m-%dT%H:%M:%S%.f%:z`
#[inline]
#[cfg(any(feature = "alloc", feature = "serde"))]
pub(crate) fn write_rfc3339(
w: &mut impl Write,
pub fn write_rfc3339(
w: &mut (impl Write + ?Sized),
dt: NaiveDateTime,
off: FixedOffset,
secform: SecondsFormat,
@@ -560,7 +565,7 @@ pub(crate) fn write_rfc3339(
#[cfg(feature = "alloc")]
/// write datetimes like `Tue, 1 Jul 2003 10:52:37 +0200`, same as `%a, %d %b %Y %H:%M:%S %z`
pub(crate) fn write_rfc2822(
w: &mut impl Write,
w: &mut (impl Write + ?Sized),
dt: NaiveDateTime,
off: FixedOffset,
) -> fmt::Result {
@@ -605,7 +610,7 @@ pub(crate) fn write_rfc2822(
}
/// Equivalent to `{:02}` formatting for n < 100.
pub(crate) fn write_hundreds(w: &mut impl Write, n: u8) -> fmt::Result {
pub(crate) fn write_hundreds(w: &mut (impl Write + ?Sized), n: u8) -> fmt::Result {
if n >= 100 {
return Err(fmt::Error);
}

View File

@@ -33,6 +33,8 @@
#[cfg(all(feature = "alloc", not(feature = "std"), not(test)))]
use alloc::boxed::Box;
#[cfg(all(feature = "core-error", not(feature = "std")))]
use core::error::Error;
use core::fmt;
use core::str::FromStr;
#[cfg(feature = "std")]
@@ -59,7 +61,7 @@ pub(crate) use formatting::write_hundreds;
#[cfg(feature = "alloc")]
pub(crate) use formatting::write_rfc2822;
#[cfg(any(feature = "alloc", feature = "serde"))]
pub(crate) use formatting::write_rfc3339;
pub use formatting::write_rfc3339;
#[cfg(feature = "alloc")]
#[allow(deprecated)]
pub use formatting::{DelayedFormat, format, format_item};
@@ -450,7 +452,7 @@ impl fmt::Display for ParseError {
}
}
#[cfg(feature = "std")]
#[cfg(any(feature = "core-error", feature = "std"))]
impl Error for ParseError {
#[allow(deprecated)]
fn description(&self) -> &str {

View File

@@ -1878,7 +1878,7 @@ mod tests {
if dt != checkdate {
// check for expected result
panic!(
"Date conversion failed for {date}\nReceived: {dt:?}\nExpected: checkdate{:?}"
"Date conversion failed for {date}\nReceived: {dt:?}\nExpected: {checkdate:?}"
);
}
}

View File

@@ -832,7 +832,7 @@ impl Parsed {
// reconstruct date and time fields from timestamp
let ts = timestamp.checked_add(i64::from(offset)).ok_or(OUT_OF_RANGE)?;
let mut datetime = DateTime::from_timestamp(ts, 0).ok_or(OUT_OF_RANGE)?.naive_utc();
let mut datetime = DateTime::from_timestamp_secs(ts).ok_or(OUT_OF_RANGE)?.naive_utc();
// fill year, ordinal, hour, minute and second fields from timestamp.
// if existing fields are consistent, this will allow the full date/time reconstruction.

View File

@@ -253,8 +253,7 @@ impl<'a> StrftimeItems<'a> {
/// const ITEMS: &[Item<'static>] = &[
/// Item::Numeric(Numeric::Year, Pad::Zero),
/// Item::Literal("-"),
/// Item::Literal("%"),
/// Item::Literal("Q"),
/// Item::Literal("%Q"),
/// ];
/// println!("{:?}", strftime_parser.clone().collect::<Vec<_>>());
/// assert!(strftime_parser.eq(ITEMS.iter().cloned()));
@@ -425,9 +424,247 @@ impl<'a> StrftimeItems<'a> {
})
.collect()
}
}
const HAVE_ALTERNATES: &str = "z";
fn parse_next_item(&mut self, mut remainder: &'a str) -> Option<(&'a str, Item<'a>)> {
use InternalInternal::*;
use Item::{Literal, Space};
use Numeric::*;
let (original, mut remainder) = match remainder.chars().next()? {
// the next item is a specifier
'%' => (remainder, &remainder[1..]),
// the next item is space
c if c.is_whitespace() => {
// `%` is not a whitespace, so `c != '%'` is redundant
let nextspec =
remainder.find(|c: char| !c.is_whitespace()).unwrap_or(remainder.len());
assert!(nextspec > 0);
let item = Space(&remainder[..nextspec]);
remainder = &remainder[nextspec..];
return Some((remainder, item));
}
// the next item is literal
_ => {
let nextspec = remainder
.find(|c: char| c.is_whitespace() || c == '%')
.unwrap_or(remainder.len());
assert!(nextspec > 0);
let item = Literal(&remainder[..nextspec]);
remainder = &remainder[nextspec..];
return Some((remainder, item));
}
};
macro_rules! next {
() => {
match remainder.chars().next() {
Some(x) => {
remainder = &remainder[x.len_utf8()..];
x
}
None => return Some((remainder, self.error(original, remainder))), // premature end of string
}
};
}
let spec = next!();
let pad_override = match spec {
'-' => Some(Pad::None),
'0' => Some(Pad::Zero),
'_' => Some(Pad::Space),
_ => None,
};
let is_alternate = spec == '#';
let spec = if pad_override.is_some() || is_alternate { next!() } else { spec };
if is_alternate && !HAVE_ALTERNATES.contains(spec) {
return Some((remainder, self.error(original, remainder)));
}
macro_rules! queue {
[$head:expr, $($tail:expr),+ $(,)*] => ({
const QUEUE: &'static [Item<'static>] = &[$($tail),+];
self.queue = QUEUE;
$head
})
}
#[cfg(not(feature = "unstable-locales"))]
macro_rules! queue_from_slice {
($slice:expr) => {{
self.queue = &$slice[1..];
$slice[0].clone()
}};
}
let item = match spec {
'A' => fixed(Fixed::LongWeekdayName),
'B' => fixed(Fixed::LongMonthName),
'C' => num0(YearDiv100),
'D' => {
queue![num0(Month), Literal("/"), num0(Day), Literal("/"), num0(YearMod100)]
}
'F' => queue![num0(Year), Literal("-"), num0(Month), Literal("-"), num0(Day)],
'G' => num0(IsoYear),
'H' => num0(Hour),
'I' => num0(Hour12),
'M' => num0(Minute),
'P' => fixed(Fixed::LowerAmPm),
'R' => queue![num0(Hour), Literal(":"), num0(Minute)],
'S' => num0(Second),
'T' => {
queue![num0(Hour), Literal(":"), num0(Minute), Literal(":"), num0(Second)]
}
'U' => num0(WeekFromSun),
'V' => num0(IsoWeek),
'W' => num0(WeekFromMon),
#[cfg(not(feature = "unstable-locales"))]
'X' => queue_from_slice!(T_FMT),
#[cfg(feature = "unstable-locales")]
'X' => self.switch_to_locale_str(locales::t_fmt, T_FMT),
'Y' => num0(Year),
'Z' => fixed(Fixed::TimezoneName),
'a' => fixed(Fixed::ShortWeekdayName),
'b' | 'h' => fixed(Fixed::ShortMonthName),
#[cfg(not(feature = "unstable-locales"))]
'c' => queue_from_slice!(D_T_FMT),
#[cfg(feature = "unstable-locales")]
'c' => self.switch_to_locale_str(locales::d_t_fmt, D_T_FMT),
'd' => num0(Day),
'e' => nums(Day),
'f' => num0(Nanosecond),
'g' => num0(IsoYearMod100),
'j' => num0(Ordinal),
'k' => nums(Hour),
'l' => nums(Hour12),
'm' => num0(Month),
'n' => Space("\n"),
'p' => fixed(Fixed::UpperAmPm),
'q' => num(Quarter),
#[cfg(not(feature = "unstable-locales"))]
'r' => queue_from_slice!(T_FMT_AMPM),
#[cfg(feature = "unstable-locales")]
'r' => {
if self.locale.is_some() && locales::t_fmt_ampm(self.locale.unwrap()).is_empty() {
// 12-hour clock not supported by this locale. Switch to 24-hour format.
self.switch_to_locale_str(locales::t_fmt, T_FMT)
} else {
self.switch_to_locale_str(locales::t_fmt_ampm, T_FMT_AMPM)
}
}
's' => num(Timestamp),
't' => Space("\t"),
'u' => num(WeekdayFromMon),
'v' => {
queue![
nums(Day),
Literal("-"),
fixed(Fixed::ShortMonthName),
Literal("-"),
num0(Year)
]
}
'w' => num(NumDaysFromSun),
#[cfg(not(feature = "unstable-locales"))]
'x' => queue_from_slice!(D_FMT),
#[cfg(feature = "unstable-locales")]
'x' => self.switch_to_locale_str(locales::d_fmt, D_FMT),
'y' => num0(YearMod100),
'z' => {
if is_alternate {
internal_fixed(TimezoneOffsetPermissive)
} else {
fixed(Fixed::TimezoneOffset)
}
}
'+' => fixed(Fixed::RFC3339),
':' => {
if remainder.starts_with("::z") {
remainder = &remainder[3..];
fixed(Fixed::TimezoneOffsetTripleColon)
} else if remainder.starts_with(":z") {
remainder = &remainder[2..];
fixed(Fixed::TimezoneOffsetDoubleColon)
} else if remainder.starts_with('z') {
remainder = &remainder[1..];
fixed(Fixed::TimezoneOffsetColon)
} else {
self.error(original, remainder)
}
}
'.' => match next!() {
'3' => match next!() {
'f' => fixed(Fixed::Nanosecond3),
_ => self.error(original, remainder),
},
'6' => match next!() {
'f' => fixed(Fixed::Nanosecond6),
_ => self.error(original, remainder),
},
'9' => match next!() {
'f' => fixed(Fixed::Nanosecond9),
_ => self.error(original, remainder),
},
'f' => fixed(Fixed::Nanosecond),
_ => self.error(original, remainder),
},
'3' => match next!() {
'f' => internal_fixed(Nanosecond3NoDot),
_ => self.error(original, remainder),
},
'6' => match next!() {
'f' => internal_fixed(Nanosecond6NoDot),
_ => self.error(original, remainder),
},
'9' => match next!() {
'f' => internal_fixed(Nanosecond9NoDot),
_ => self.error(original, remainder),
},
'%' => Literal("%"),
_ => self.error(original, remainder),
};
// Adjust `item` if we have any padding modifier.
// Not allowed on non-numeric items or on specifiers composed out of multiple
// formatting items.
if let Some(new_pad) = pad_override {
match item {
Item::Numeric(ref kind, _pad) if self.queue.is_empty() => {
Some((remainder, Item::Numeric(kind.clone(), new_pad)))
}
_ => Some((remainder, self.error(original, remainder))),
}
} else {
Some((remainder, item))
}
}
fn error<'b>(&mut self, original: &'b str, remainder: &'b str) -> Item<'b> {
match self.lenient {
false => Item::Error,
true => Item::Literal(&original[..original.len() - remainder.len()]),
}
}
#[cfg(feature = "unstable-locales")]
fn switch_to_locale_str(
&mut self,
localized_fmt_str: impl Fn(Locale) -> &'static str,
fallback: &'static [Item<'static>],
) -> Item<'a> {
if let Some(locale) = self.locale {
assert!(self.locale_str.is_empty());
let (fmt_str, item) = self.parse_next_item(localized_fmt_str(locale)).unwrap();
self.locale_str = fmt_str;
item
} else {
self.queue = &fallback[1..];
fallback[0].clone()
}
}
}
impl<'a> Iterator for StrftimeItems<'a> {
type Item = Item<'a>;
@@ -454,330 +691,46 @@ impl<'a> Iterator for StrftimeItems<'a> {
}
}
impl<'a> StrftimeItems<'a> {
fn error<'b>(
&mut self,
original: &'b str,
error_len: &mut usize,
ch: Option<char>,
) -> (&'b str, Item<'b>) {
if !self.lenient {
return (&original[*error_len..], Item::Error);
}
static D_FMT: &[Item<'static>] = &[
num0(Numeric::Month),
Item::Literal("/"),
num0(Numeric::Day),
Item::Literal("/"),
num0(Numeric::YearMod100),
];
static D_T_FMT: &[Item<'static>] = &[
fixed(Fixed::ShortWeekdayName),
Item::Space(" "),
fixed(Fixed::ShortMonthName),
Item::Space(" "),
nums(Numeric::Day),
Item::Space(" "),
num0(Numeric::Hour),
Item::Literal(":"),
num0(Numeric::Minute),
Item::Literal(":"),
num0(Numeric::Second),
Item::Space(" "),
num0(Numeric::Year),
];
static T_FMT: &[Item<'static>] = &[
num0(Numeric::Hour),
Item::Literal(":"),
num0(Numeric::Minute),
Item::Literal(":"),
num0(Numeric::Second),
];
static T_FMT_AMPM: &[Item<'static>] = &[
num0(Numeric::Hour12),
Item::Literal(":"),
num0(Numeric::Minute),
Item::Literal(":"),
num0(Numeric::Second),
Item::Space(" "),
fixed(Fixed::UpperAmPm),
];
if let Some(c) = ch {
*error_len -= c.len_utf8();
}
(&original[*error_len..], Item::Literal(&original[..*error_len]))
}
fn parse_next_item(&mut self, mut remainder: &'a str) -> Option<(&'a str, Item<'a>)> {
use InternalInternal::*;
use Item::{Literal, Space};
use Numeric::*;
static D_FMT: &[Item<'static>] =
&[num0(Month), Literal("/"), num0(Day), Literal("/"), num0(YearMod100)];
static D_T_FMT: &[Item<'static>] = &[
fixed(Fixed::ShortWeekdayName),
Space(" "),
fixed(Fixed::ShortMonthName),
Space(" "),
nums(Day),
Space(" "),
num0(Hour),
Literal(":"),
num0(Minute),
Literal(":"),
num0(Second),
Space(" "),
num0(Year),
];
static T_FMT: &[Item<'static>] =
&[num0(Hour), Literal(":"), num0(Minute), Literal(":"), num0(Second)];
static T_FMT_AMPM: &[Item<'static>] = &[
num0(Hour12),
Literal(":"),
num0(Minute),
Literal(":"),
num0(Second),
Space(" "),
fixed(Fixed::UpperAmPm),
];
match remainder.chars().next() {
// we are done
None => None,
// the next item is a specifier
Some('%') => {
let original = remainder;
remainder = &remainder[1..];
let mut error_len = 0;
if self.lenient {
error_len += 1;
}
macro_rules! next {
() => {
match remainder.chars().next() {
Some(x) => {
remainder = &remainder[x.len_utf8()..];
if self.lenient {
error_len += x.len_utf8();
}
x
}
None => return Some(self.error(original, &mut error_len, None)), // premature end of string
}
};
}
let spec = next!();
let pad_override = match spec {
'-' => Some(Pad::None),
'0' => Some(Pad::Zero),
'_' => Some(Pad::Space),
_ => None,
};
let is_alternate = spec == '#';
let spec = if pad_override.is_some() || is_alternate { next!() } else { spec };
if is_alternate && !HAVE_ALTERNATES.contains(spec) {
return Some(self.error(original, &mut error_len, Some(spec)));
}
macro_rules! queue {
[$head:expr, $($tail:expr),+ $(,)*] => ({
const QUEUE: &'static [Item<'static>] = &[$($tail),+];
self.queue = QUEUE;
$head
})
}
#[cfg(not(feature = "unstable-locales"))]
macro_rules! queue_from_slice {
($slice:expr) => {{
self.queue = &$slice[1..];
$slice[0].clone()
}};
}
let item = match spec {
'A' => fixed(Fixed::LongWeekdayName),
'B' => fixed(Fixed::LongMonthName),
'C' => num0(YearDiv100),
'D' => {
queue![num0(Month), Literal("/"), num0(Day), Literal("/"), num0(YearMod100)]
}
'F' => queue![num0(Year), Literal("-"), num0(Month), Literal("-"), num0(Day)],
'G' => num0(IsoYear),
'H' => num0(Hour),
'I' => num0(Hour12),
'M' => num0(Minute),
'P' => fixed(Fixed::LowerAmPm),
'R' => queue![num0(Hour), Literal(":"), num0(Minute)],
'S' => num0(Second),
'T' => {
queue![num0(Hour), Literal(":"), num0(Minute), Literal(":"), num0(Second)]
}
'U' => num0(WeekFromSun),
'V' => num0(IsoWeek),
'W' => num0(WeekFromMon),
#[cfg(not(feature = "unstable-locales"))]
'X' => queue_from_slice!(T_FMT),
#[cfg(feature = "unstable-locales")]
'X' => self.switch_to_locale_str(locales::t_fmt, T_FMT),
'Y' => num0(Year),
'Z' => fixed(Fixed::TimezoneName),
'a' => fixed(Fixed::ShortWeekdayName),
'b' | 'h' => fixed(Fixed::ShortMonthName),
#[cfg(not(feature = "unstable-locales"))]
'c' => queue_from_slice!(D_T_FMT),
#[cfg(feature = "unstable-locales")]
'c' => self.switch_to_locale_str(locales::d_t_fmt, D_T_FMT),
'd' => num0(Day),
'e' => nums(Day),
'f' => num0(Nanosecond),
'g' => num0(IsoYearMod100),
'j' => num0(Ordinal),
'k' => nums(Hour),
'l' => nums(Hour12),
'm' => num0(Month),
'n' => Space("\n"),
'p' => fixed(Fixed::UpperAmPm),
'q' => num(Quarter),
#[cfg(not(feature = "unstable-locales"))]
'r' => queue_from_slice!(T_FMT_AMPM),
#[cfg(feature = "unstable-locales")]
'r' => {
if self.locale.is_some()
&& locales::t_fmt_ampm(self.locale.unwrap()).is_empty()
{
// 12-hour clock not supported by this locale. Switch to 24-hour format.
self.switch_to_locale_str(locales::t_fmt, T_FMT)
} else {
self.switch_to_locale_str(locales::t_fmt_ampm, T_FMT_AMPM)
}
}
's' => num(Timestamp),
't' => Space("\t"),
'u' => num(WeekdayFromMon),
'v' => {
queue![
nums(Day),
Literal("-"),
fixed(Fixed::ShortMonthName),
Literal("-"),
num0(Year)
]
}
'w' => num(NumDaysFromSun),
#[cfg(not(feature = "unstable-locales"))]
'x' => queue_from_slice!(D_FMT),
#[cfg(feature = "unstable-locales")]
'x' => self.switch_to_locale_str(locales::d_fmt, D_FMT),
'y' => num0(YearMod100),
'z' => {
if is_alternate {
internal_fixed(TimezoneOffsetPermissive)
} else {
fixed(Fixed::TimezoneOffset)
}
}
'+' => fixed(Fixed::RFC3339),
':' => {
if remainder.starts_with("::z") {
remainder = &remainder[3..];
fixed(Fixed::TimezoneOffsetTripleColon)
} else if remainder.starts_with(":z") {
remainder = &remainder[2..];
fixed(Fixed::TimezoneOffsetDoubleColon)
} else if remainder.starts_with('z') {
remainder = &remainder[1..];
fixed(Fixed::TimezoneOffsetColon)
} else {
self.error(original, &mut error_len, None).1
}
}
'.' => match next!() {
'3' => match next!() {
'f' => fixed(Fixed::Nanosecond3),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
},
'6' => match next!() {
'f' => fixed(Fixed::Nanosecond6),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
},
'9' => match next!() {
'f' => fixed(Fixed::Nanosecond9),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
},
'f' => fixed(Fixed::Nanosecond),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
},
'3' => match next!() {
'f' => internal_fixed(Nanosecond3NoDot),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
},
'6' => match next!() {
'f' => internal_fixed(Nanosecond6NoDot),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
},
'9' => match next!() {
'f' => internal_fixed(Nanosecond9NoDot),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
},
'%' => Literal("%"),
c => {
let res = self.error(original, &mut error_len, Some(c));
remainder = res.0;
res.1
}
};
// Adjust `item` if we have any padding modifier.
// Not allowed on non-numeric items or on specifiers composed out of multiple
// formatting items.
if let Some(new_pad) = pad_override {
match item {
Item::Numeric(ref kind, _pad) if self.queue.is_empty() => {
Some((remainder, Item::Numeric(kind.clone(), new_pad)))
}
_ => Some(self.error(original, &mut error_len, None)),
}
} else {
Some((remainder, item))
}
}
// the next item is space
Some(c) if c.is_whitespace() => {
// `%` is not a whitespace, so `c != '%'` is redundant
let nextspec =
remainder.find(|c: char| !c.is_whitespace()).unwrap_or(remainder.len());
assert!(nextspec > 0);
let item = Space(&remainder[..nextspec]);
remainder = &remainder[nextspec..];
Some((remainder, item))
}
// the next item is literal
_ => {
let nextspec = remainder
.find(|c: char| c.is_whitespace() || c == '%')
.unwrap_or(remainder.len());
assert!(nextspec > 0);
let item = Literal(&remainder[..nextspec]);
remainder = &remainder[nextspec..];
Some((remainder, item))
}
}
}
#[cfg(feature = "unstable-locales")]
fn switch_to_locale_str(
&mut self,
localized_fmt_str: impl Fn(Locale) -> &'static str,
fallback: &'static [Item<'static>],
) -> Item<'a> {
if let Some(locale) = self.locale {
assert!(self.locale_str.is_empty());
let (fmt_str, item) = self.parse_next_item(localized_fmt_str(locale)).unwrap();
self.locale_str = fmt_str;
item
} else {
self.queue = &fallback[1..];
fallback[0].clone()
}
}
}
const HAVE_ALTERNATES: &str = "z";
#[cfg(test)]
mod tests {
@@ -1246,4 +1199,18 @@ mod tests {
"2014-05-07T12:34:56+0000%Q%.2f%%"
);
}
/// Regression test for https://github.com/chronotope/chrono/issues/1725
#[test]
#[cfg(any(feature = "alloc", feature = "std"))]
fn test_finite() {
let mut i = 0;
for item in StrftimeItems::new("%2f") {
println!("{:?}", item);
i += 1;
if i > 10 {
panic!("infinite loop");
}
}
}
}

View File

@@ -380,7 +380,7 @@
//! use chrono::{DateTime, Utc};
//!
//! // Construct a datetime from epoch:
//! let dt: DateTime<Utc> = DateTime::from_timestamp(1_500_000_000, 0).unwrap();
//! let dt: DateTime<Utc> = DateTime::from_timestamp_secs(1_500_000_000).unwrap();
//! assert_eq!(dt.to_rfc2822(), "Fri, 14 Jul 2017 02:40:00 +0000");
//!
//! // Get epoch value from a datetime:
@@ -512,8 +512,8 @@
extern crate alloc;
mod time_delta;
#[cfg(feature = "std")]
#[doc(no_inline)]
#[cfg(any(feature = "std", feature = "core-error"))]
pub use time_delta::OutOfRangeError;
pub use time_delta::TimeDelta;
@@ -644,7 +644,7 @@ pub mod serde {
/// Zero-copy serialization/deserialization with rkyv.
///
/// This module re-exports the `Archived*` versions of chrono's types.
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
pub mod rkyv {
pub use crate::datetime::ArchivedDateTime;
pub use crate::month::ArchivedMonth;
@@ -690,6 +690,9 @@ impl fmt::Debug for OutOfRange {
#[cfg(feature = "std")]
impl std::error::Error for OutOfRange {}
#[cfg(all(not(feature = "std"), feature = "core-error"))]
impl core::error::Error for OutOfRange {}
/// Workaround because `?` is not (yet) available in const context.
#[macro_export]
#[doc(hidden)]

View File

@@ -1,6 +1,6 @@
use core::fmt;
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
use crate::OutOfRange;
@@ -31,7 +31,7 @@ use crate::naive::NaiveDate;
// Actual implementation is zero-indexed, API intended as 1-indexed for more intuitive behavior.
#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash, PartialOrd, Ord)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq, PartialOrd)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)))
@@ -272,6 +272,9 @@ pub struct ParseMonthError {
#[cfg(feature = "std")]
impl std::error::Error for ParseMonthError {}
#[cfg(all(not(feature = "std"), feature = "core-error"))]
impl core::error::Error for ParseMonthError {}
impl fmt::Display for ParseMonthError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ParseMonthError {{ .. }}")

View File

@@ -20,13 +20,15 @@ use core::num::NonZeroI32;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use core::{fmt, str};
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
/// L10n locales.
#[cfg(all(feature = "unstable-locales", feature = "alloc"))]
use pure_rust_locales::Locale;
use super::internals::{Mdf, YearFlags};
use crate::datetime::UNIX_EPOCH_DAY;
#[cfg(feature = "alloc")]
use crate::format::DelayedFormat;
use crate::format::{
@@ -38,8 +40,6 @@ use crate::naive::{Days, IsoWeek, NaiveDateTime, NaiveTime, NaiveWeek};
use crate::{Datelike, TimeDelta, Weekday};
use crate::{expect, try_opt};
use super::internals::{Mdf, YearFlags};
#[cfg(test)]
mod tests;
@@ -93,7 +93,7 @@ mod tests;
/// [proleptic Gregorian date]: crate::NaiveDate#calendar-date
#[derive(PartialEq, Eq, Hash, PartialOrd, Ord, Copy, Clone)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq, PartialOrd)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)))
@@ -384,6 +384,35 @@ impl NaiveDate {
NaiveDate::from_ordinal_and_flags(year_div_400 * 400 + year_mod_400 as i32, ordinal, flags)
}
/// Makes a new `NaiveDate` from a day's number in the proleptic Gregorian calendar, with
/// January 1, 1970 being day 0.
///
/// # Errors
///
/// Returns `None` if the date is out of range.
///
/// # Example
///
/// ```
/// use chrono::NaiveDate;
///
/// let from_ndays_opt = NaiveDate::from_epoch_days;
/// let from_ymd = |y, m, d| NaiveDate::from_ymd_opt(y, m, d).unwrap();
///
/// assert_eq!(from_ndays_opt(-719_162), Some(from_ymd(1, 1, 1)));
/// assert_eq!(from_ndays_opt(1), Some(from_ymd(1970, 1, 2)));
/// assert_eq!(from_ndays_opt(0), Some(from_ymd(1970, 1, 1)));
/// assert_eq!(from_ndays_opt(-1), Some(from_ymd(1969, 12, 31)));
/// assert_eq!(from_ndays_opt(13036), Some(from_ymd(2005, 9, 10)));
/// assert_eq!(from_ndays_opt(100_000_000), None);
/// assert_eq!(from_ndays_opt(-100_000_000), None);
/// ```
#[must_use]
pub const fn from_epoch_days(days: i32) -> Option<NaiveDate> {
let ce_days = try_opt!(days.checked_add(UNIX_EPOCH_DAY as i32));
NaiveDate::from_num_days_from_ce_opt(ce_days)
}
/// Makes a new `NaiveDate` by counting the number of occurrences of a particular day-of-week
/// since the beginning of the given month. For instance, if you want the 2nd Friday of March
/// 2017, you would use `NaiveDate::from_weekday_of_month(2017, 3, Weekday::Fri, 2)`.
@@ -1407,6 +1436,23 @@ impl NaiveDate {
ndays + self.ordinal() as i32
}
/// Counts the days in the proleptic Gregorian calendar, with January 1, Year 1970 as day 0.
///
/// # Example
///
/// ```
/// use chrono::NaiveDate;
///
/// let from_ymd = |y, m, d| NaiveDate::from_ymd_opt(y, m, d).unwrap();
///
/// assert_eq!(from_ymd(1, 1, 1).to_epoch_days(), -719162);
/// assert_eq!(from_ymd(1970, 1, 1).to_epoch_days(), 0);
/// assert_eq!(from_ymd(2005, 9, 10).to_epoch_days(), 13036);
/// ```
pub const fn to_epoch_days(&self) -> i32 {
self.num_days_from_ce() - UNIX_EPOCH_DAY as i32
}
/// Create a new `NaiveDate` from a raw year-ordinal-flags `i32`.
///
/// In a valid value an ordinal is never `0`, and neither are the year flags. This method

View File

@@ -301,6 +301,39 @@ fn test_date_from_num_days_from_ce() {
assert_eq!(from_ndays_from_ce(i32::MAX), None);
}
#[test]
fn test_date_from_epoch_days() {
let from_epoch_days = NaiveDate::from_epoch_days;
assert_eq!(from_epoch_days(-719_162), Some(NaiveDate::from_ymd_opt(1, 1, 1).unwrap()));
assert_eq!(from_epoch_days(0), Some(NaiveDate::from_ymd_opt(1970, 1, 1).unwrap()));
assert_eq!(from_epoch_days(1), Some(NaiveDate::from_ymd_opt(1970, 1, 2).unwrap()));
assert_eq!(from_epoch_days(2), Some(NaiveDate::from_ymd_opt(1970, 1, 3).unwrap()));
assert_eq!(from_epoch_days(30), Some(NaiveDate::from_ymd_opt(1970, 1, 31).unwrap()));
assert_eq!(from_epoch_days(31), Some(NaiveDate::from_ymd_opt(1970, 2, 1).unwrap()));
assert_eq!(from_epoch_days(58), Some(NaiveDate::from_ymd_opt(1970, 2, 28).unwrap()));
assert_eq!(from_epoch_days(59), Some(NaiveDate::from_ymd_opt(1970, 3, 1).unwrap()));
assert_eq!(from_epoch_days(364), Some(NaiveDate::from_ymd_opt(1970, 12, 31).unwrap()));
assert_eq!(from_epoch_days(365), Some(NaiveDate::from_ymd_opt(1971, 1, 1).unwrap()));
assert_eq!(from_epoch_days(365 * 2), Some(NaiveDate::from_ymd_opt(1972, 1, 1).unwrap()));
assert_eq!(from_epoch_days(365 * 3 + 1), Some(NaiveDate::from_ymd_opt(1973, 1, 1).unwrap()));
assert_eq!(from_epoch_days(365 * 4 + 1), Some(NaiveDate::from_ymd_opt(1974, 1, 1).unwrap()));
assert_eq!(from_epoch_days(13036), Some(NaiveDate::from_ymd_opt(2005, 9, 10).unwrap()));
assert_eq!(from_epoch_days(-365), Some(NaiveDate::from_ymd_opt(1969, 1, 1).unwrap()));
assert_eq!(from_epoch_days(-366), Some(NaiveDate::from_ymd_opt(1968, 12, 31).unwrap()));
for days in (-9999..10001).map(|x| x * 100) {
assert_eq!(from_epoch_days(days).map(|d| d.to_epoch_days()), Some(days));
}
assert_eq!(from_epoch_days(NaiveDate::MIN.to_epoch_days()), Some(NaiveDate::MIN));
assert_eq!(from_epoch_days(NaiveDate::MIN.to_epoch_days() - 1), None);
assert_eq!(from_epoch_days(NaiveDate::MAX.to_epoch_days()), Some(NaiveDate::MAX));
assert_eq!(from_epoch_days(NaiveDate::MAX.to_epoch_days() + 1), None);
assert_eq!(from_epoch_days(i32::MIN), None);
assert_eq!(from_epoch_days(i32::MAX), None);
}
#[test]
fn test_date_from_weekday_of_month_opt() {
let ymwd = NaiveDate::from_weekday_of_month_opt;
@@ -423,6 +456,18 @@ fn test_date_num_days_from_ce() {
}
}
#[test]
fn test_date_to_epoch_days() {
assert_eq!(NaiveDate::from_ymd_opt(1970, 1, 1).unwrap().to_epoch_days(), 0);
for year in -9999..10001 {
assert_eq!(
NaiveDate::from_ymd_opt(year, 1, 1).unwrap().to_epoch_days(),
NaiveDate::from_ymd_opt(year - 1, 12, 31).unwrap().to_epoch_days() + 1
);
}
}
#[test]
fn test_date_succ() {
let ymd = |y, m, d| NaiveDate::from_ymd_opt(y, m, d).unwrap();

View File

@@ -10,7 +10,7 @@ use core::ops::{Add, AddAssign, Sub, SubAssign};
use core::time::Duration;
use core::{fmt, str};
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
#[cfg(feature = "alloc")]
@@ -66,7 +66,7 @@ pub const MAX_DATETIME: NaiveDateTime = NaiveDateTime::MAX;
/// ```
#[derive(PartialEq, Eq, Hash, PartialOrd, Ord, Copy, Clone)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq, PartialOrd)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)))

View File

@@ -955,7 +955,7 @@ pub mod ts_seconds {
/// }
///
/// let my_s: S = serde_json::from_str(r#"{ "time": 1431684000 }"#)?;
/// let expected = DateTime::from_timestamp(1431684000, 0).unwrap().naive_utc();
/// let expected = DateTime::from_timestamp_secs(1431684000).unwrap().naive_utc();
/// assert_eq!(my_s, S { time: expected });
/// # Ok::<(), serde_json::Error>(())
/// ```
@@ -979,7 +979,7 @@ pub mod ts_seconds {
where
E: de::Error,
{
DateTime::from_timestamp(value, 0)
DateTime::from_timestamp_secs(value)
.map(|dt| dt.naive_utc())
.ok_or_else(|| invalid_ts(value))
}
@@ -991,7 +991,7 @@ pub mod ts_seconds {
if value > i64::MAX as u64 {
Err(invalid_ts(value))
} else {
DateTime::from_timestamp(value as i64, 0)
DateTime::from_timestamp_secs(value as i64)
.map(|dt| dt.naive_utc())
.ok_or_else(|| invalid_ts(value))
}
@@ -1080,7 +1080,7 @@ pub mod ts_seconds_option {
/// }
///
/// let my_s: S = serde_json::from_str(r#"{ "time": 1431684000 }"#)?;
/// let expected = DateTime::from_timestamp(1431684000, 0).unwrap().naive_utc();
/// let expected = DateTime::from_timestamp_secs(1431684000).unwrap().naive_utc();
/// assert_eq!(my_s, S { time: Some(expected) });
/// # Ok::<(), serde_json::Error>(())
/// ```

View File

@@ -7,7 +7,7 @@ use core::fmt;
use super::internals::YearFlags;
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
/// ISO 8601 week.
@@ -18,7 +18,7 @@ use rkyv::{Archive, Deserialize, Serialize};
/// via the [`Datelike::iso_week`](../trait.Datelike.html#tymethod.iso_week) method.
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq, PartialOrd)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)))
@@ -176,13 +176,13 @@ mod tests {
assert_eq!(minweek.week(), 1);
assert_eq!(minweek.week0(), 0);
#[cfg(feature = "alloc")]
assert_eq!(format!("{:?}", minweek), NaiveDate::MIN.format("%G-W%V").to_string());
assert_eq!(format!("{minweek:?}"), NaiveDate::MIN.format("%G-W%V").to_string());
assert_eq!(maxweek.year(), date::MAX_YEAR + 1);
assert_eq!(maxweek.week(), 1);
assert_eq!(maxweek.week0(), 0);
#[cfg(feature = "alloc")]
assert_eq!(format!("{:?}", maxweek), NaiveDate::MAX.format("%G-W%V").to_string());
assert_eq!(format!("{maxweek:?}"), NaiveDate::MAX.format("%G-W%V").to_string());
}
#[test]

View File

@@ -9,7 +9,7 @@ use core::ops::{Add, AddAssign, Sub, SubAssign};
use core::time::Duration;
use core::{fmt, str};
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
#[cfg(feature = "alloc")]
@@ -211,7 +211,7 @@ mod tests;
/// **there is absolutely no guarantee that the leap second read has actually happened**.
#[derive(PartialEq, Eq, Hash, PartialOrd, Ord, Copy, Clone)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq, PartialOrd)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)))

View File

@@ -283,26 +283,23 @@ fn test_time_from_str() {
"23:59:60.373929310237",
];
for &s in &valid {
eprintln!("test_time_parse_from_str valid {:?}", s);
eprintln!("test_time_parse_from_str valid {s:?}");
let d = match s.parse::<NaiveTime>() {
Ok(d) => d,
Err(e) => panic!("parsing `{}` has failed: {}", s, e),
Err(e) => panic!("parsing `{s}` has failed: {e}"),
};
let s_ = format!("{:?}", d);
let s_ = format!("{d:?}");
// `s` and `s_` may differ, but `s.parse()` and `s_.parse()` must be same
let d_ = match s_.parse::<NaiveTime>() {
Ok(d) => d,
Err(e) => {
panic!("`{}` is parsed into `{:?}`, but reparsing that has failed: {}", s, d, e)
panic!("`{s}` is parsed into `{d:?}`, but reparsing that has failed: {e}")
}
};
assert!(
d == d_,
"`{}` is parsed into `{:?}`, but reparsed result \
`{:?}` does not match",
s,
d,
d_
"`{s}` is parsed into `{d:?}`, but reparsed result \
`{d_:?}` does not match"
);
}
@@ -329,7 +326,7 @@ fn test_time_from_str() {
"09:08:00000000007", // invalid second / invalid fraction format
];
for &s in &invalid {
eprintln!("test_time_parse_from_str invalid {:?}", s);
eprintln!("test_time_parse_from_str invalid {s:?}");
assert!(s.parse::<NaiveTime>().is_err());
}
}

View File

@@ -6,7 +6,7 @@
use core::fmt;
use core::str::FromStr;
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
use super::{MappedLocalTime, Offset, TimeZone};
@@ -21,7 +21,7 @@ use crate::naive::{NaiveDate, NaiveDateTime};
/// [`west_opt`](#method.west_opt) methods for examples.
#[derive(PartialEq, Eq, Hash, Copy, Clone)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, Hash, Debug)))

View File

@@ -6,7 +6,7 @@
#[cfg(windows)]
use std::cmp::Ordering;
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
use super::fixed::FixedOffset;
@@ -115,10 +115,10 @@ mod tz_info;
/// ```
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq)),
rkyv(attr(derive(Clone, Copy, Debug)))
archive(compare(PartialEq)),
archive_attr(derive(Clone, Copy, Debug))
)]
#[cfg_attr(feature = "rkyv-validation", archive(check_bytes))]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
@@ -219,7 +219,7 @@ impl Transition {
#[cfg(windows)]
impl PartialOrd for Transition {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.transition_utc.cmp(&other.transition_utc))
Some(self.cmp(other))
}
}
@@ -343,8 +343,7 @@ mod tests {
// but there are only two sensible options.
assert!(
timestr == "15:02:60" || timestr == "15:03:00",
"unexpected timestr {:?}",
timestr
"unexpected timestr {timestr:?}"
);
}
@@ -352,8 +351,7 @@ mod tests {
let timestr = dt.time().to_string();
assert!(
timestr == "15:02:03.234" || timestr == "15:02:04.234",
"unexpected timestr {:?}",
timestr
"unexpected timestr {timestr:?}"
);
}
}

View File

@@ -134,7 +134,7 @@ impl TimeZone {
}
/// Returns a reference to the time zone
fn as_ref(&'_ self) -> TimeZoneRef<'_> {
fn as_ref(&self) -> TimeZoneRef<'_> {
TimeZoneRef {
transitions: &self.transitions,
local_time_types: &self.local_time_types,

View File

@@ -673,7 +673,7 @@ mod tests {
MappedLocalTime::Single(dt) => {
assert_eq!(dt.to_string(), *expected);
}
e => panic!("Got {:?} instead of an okay answer", e),
e => panic!("Got {e:?} instead of an okay answer"),
}
}
}

View File

@@ -14,7 +14,7 @@ use core::fmt;
))]
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
use super::{FixedOffset, MappedLocalTime, Offset, TimeZone};
@@ -42,7 +42,7 @@ use crate::{Date, DateTime};
/// ```
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, Debug, Hash)))

View File

@@ -109,7 +109,11 @@ pub trait DurationRound: Sized {
type Err: std::error::Error;
/// Error that can occur in rounding or truncating
#[cfg(not(feature = "std"))]
#[cfg(all(not(feature = "std"), feature = "core-error"))]
type Err: core::error::Error;
/// Error that can occur in rounding or truncating
#[cfg(all(not(feature = "std"), not(feature = "core-error")))]
type Err: fmt::Debug + fmt::Display;
/// Return a copy rounded by TimeDelta.
@@ -362,6 +366,14 @@ impl std::error::Error for RoundingError {
}
}
#[cfg(all(not(feature = "std"), feature = "core-error"))]
impl core::error::Error for RoundingError {
#[allow(deprecated)]
fn description(&self) -> &str {
"error from rounding or truncating with DurationRound"
}
}
#[cfg(test)]
mod tests {
use super::{DurationRound, RoundingError, SubsecRound, TimeDelta};

View File

@@ -10,6 +10,8 @@
//! Temporal quantification
#[cfg(all(not(feature = "std"), feature = "core-error"))]
use core::error::Error;
use core::fmt;
use core::ops::{Add, AddAssign, Div, Mul, Neg, Sub, SubAssign};
use core::time::Duration;
@@ -18,7 +20,7 @@ use std::error::Error;
use crate::{expect, try_opt};
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
/// The number of nanoseconds in a microsecond.
@@ -51,7 +53,7 @@ const SECS_PER_WEEK: i64 = 604_800;
/// instance `abs()` can be called without any checks.
#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq, PartialOrd)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)))
@@ -630,7 +632,7 @@ impl fmt::Display for OutOfRangeError {
}
}
#[cfg(feature = "std")]
#[cfg(any(feature = "std", feature = "core-error"))]
impl Error for OutOfRangeError {
#[allow(deprecated)]
fn description(&self) -> &str {

View File

@@ -366,7 +366,7 @@ mod tests {
///
/// Panics if `div` is not positive.
fn in_between(start: i32, end: i32, div: i32) -> i32 {
assert!(div > 0, "in_between: nonpositive div = {}", div);
assert!(div > 0, "in_between: nonpositive div = {div}");
let start = (start.div_euclid(div), start.rem_euclid(div));
let end = (end.div_euclid(div), end.rem_euclid(div));
// The lowest multiple of `div` greater than or equal to `start`, divided.
@@ -390,16 +390,10 @@ mod tests {
assert_eq!(
jan1_year.num_days_from_ce(),
num_days_from_ce(&jan1_year),
"on {:?}",
jan1_year
"on {jan1_year:?}"
);
let mid_year = jan1_year + Days::new(133);
assert_eq!(
mid_year.num_days_from_ce(),
num_days_from_ce(&mid_year),
"on {:?}",
mid_year
);
assert_eq!(mid_year.num_days_from_ce(), num_days_from_ce(&mid_year), "on {mid_year:?}");
}
}

View File

@@ -1,6 +1,6 @@
use core::fmt;
#[cfg(any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
#[cfg(any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"))]
use rkyv::{Archive, Deserialize, Serialize};
use crate::OutOfRange;
@@ -31,7 +31,7 @@ use crate::OutOfRange;
/// ```
#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)]
#[cfg_attr(
any(feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
any(feature = "rkyv", feature = "rkyv-16", feature = "rkyv-32", feature = "rkyv-64"),
derive(Archive, Deserialize, Serialize),
rkyv(compare(PartialEq)),
rkyv(attr(derive(Clone, Copy, PartialEq, Eq, Debug, Hash)))
@@ -238,6 +238,9 @@ pub struct ParseWeekdayError {
pub(crate) _dummy: (),
}
#[cfg(all(not(feature = "std"), feature = "core-error"))]
impl core::error::Error for ParseWeekdayError {}
#[cfg(feature = "std")]
impl std::error::Error for ParseWeekdayError {}

View File

@@ -11,10 +11,11 @@ use crate::parse;
pub struct LoadResult {
/// Number of successfully loaded variables
pub loaded: usize,
/// Number of variables that were skipped (for `load` method only)
pub skipped: usize,
/// Number of variables that were overridden (for `load_override` method only)
pub overridden: usize,
// /// Number of variables that were skipped (for `load` method only)
// pub skipped: usize,
// /// Number of variables that were overridden (for `load_override` method only)
// pub overridden: usize,
pub skipped_or_overridden: usize,
}
pub struct Iter<R> {
@@ -57,8 +58,7 @@ impl<R: Read> Iter<R> {
Ok(LoadResult {
loaded,
skipped,
overridden: 0,
skipped_or_overridden: skipped,
})
}
@@ -86,8 +86,7 @@ impl<R: Read> Iter<R> {
Ok(LoadResult {
loaded,
skipped: 0,
overridden,
skipped_or_overridden: overridden,
})
}

10
patch/macros/Cargo.toml Normal file
View File

@@ -0,0 +1,10 @@
[package]
name = "macros"
version = "0.1.0"
edition = "2024"
authors = ["wisdgod <nav@wisdgod.com>"]
license = "MIT OR Apache-2.0"
description = "A Proto3 file dependency analyzer and optimizer"
repository = "https://github.com/wisdgod/ppp"
[dependencies]

105
patch/macros/src/lib.rs Normal file
View File

@@ -0,0 +1,105 @@
/// Batch define constants of the same type with shared attributes.
///
/// # Examples
///
/// ```
/// define_typed_constants! {
/// pub u32 => {
/// MAX_CONNECTIONS = 1024,
/// DEFAULT_TIMEOUT = 30,
/// MIN_BUFFER_SIZE = 256,
/// }
///
/// #[allow(dead_code)]
/// &'static str => {
/// APP_NAME = "server",
/// VERSION = "1.0.0",
/// }
/// }
/// ```
#[macro_export]
macro_rules! define_typed_constants {
// Entry point: process type group with first constant
(
$(#[$group_attr:meta])*
$vis:vis $ty:ty => {
$(#[$attr:meta])*
$name:ident = $value:expr,
$($inner_rest:tt)*
}
$($rest:tt)*
) => {
$(#[$attr])*
$(#[$group_attr])*
$vis const $name: $ty = $value;
$crate::define_typed_constants! {
@same_type
$(#[$group_attr])*
$vis $ty => {
$($inner_rest)*
}
}
$crate::define_typed_constants! {
$($rest)*
}
};
// Process remaining constants of the same type
(
@same_type
$(#[$group_attr:meta])*
$vis:vis $ty:ty => {
$(#[$attr:meta])*
$name:ident = $value:expr,
$($rest:tt)*
}
) => {
$(#[$attr])*
$(#[$group_attr])*
$vis const $name: $ty = $value;
$crate::define_typed_constants! {
@same_type
$(#[$group_attr])*
$vis $ty => {
$($rest)*
}
}
};
// Last constant in type group (no trailing comma)
(
@same_type
$(#[$group_attr:meta])*
$vis:vis $ty:ty => {
$(#[$attr:meta])*
$name:ident = $value:expr
}
) => {
$(#[$attr])*
$(#[$group_attr])*
$vis const $name: $ty = $value;
};
// Empty type group
(@same_type $(#[$group_attr:meta])* $vis:vis $ty:ty => {}) => {};
// Terminal case
() => {};
}
#[macro_export]
macro_rules! transmute_unchecked {
($x:expr) => {
unsafe { ::core::intrinsics::transmute_unchecked($x) }
};
}
#[macro_export]
macro_rules! unwrap_unchecked {
($x:expr) => {
unsafe { $x.unwrap_unchecked() }
};
}

View File

@@ -0,0 +1,87 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.71.1"
name = "prost"
version = "0.14.1"
authors = [
"Dan Burkert <dan@danburkert.com>",
"Lucio Franco <luciofranco14@gmail.com>",
"Casper Meijn <casper@meijn.net>",
"Tokio Contributors <team@tokio.rs>",
]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "A Protocol Buffers implementation for the Rust Language."
readme = "README.md"
keywords = [
"protobuf",
"serialization",
]
categories = ["encoding"]
license = "Apache-2.0"
repository = "https://github.com/tokio-rs/prost"
[features]
default = [
"derive",
"std",
]
derive = ["dep:prost-derive"]
no-recursion-limit = []
std = []
indexmap = ["dep:indexmap"]
[lib]
name = "prost"
path = "src/lib.rs"
bench = false
[dependencies.bytes]
version = "1"
default-features = false
[dependencies.prost-derive]
path = "../prost-derive"
optional = true
[dependencies.macros]
path = "../macros"
[dependencies.indexmap]
version = "2"
optional = true
[dependencies.cfg-if]
version = "1.0"
[dependencies.any_all_workaround]
version = "0.1"
[dependencies.serde]
version = "1"
default-features = false
[dev-dependencies.criterion]
version = "0.7"
default-features = false
[dev-dependencies.proptest]
version = "1"
[dev-dependencies.rand]
version = "0.9"

35
patch/prost-0.14.1/Cargo.toml.orig generated Normal file
View File

@@ -0,0 +1,35 @@
[package]
name = "prost"
readme = "README.md"
description = "A Protocol Buffers implementation for the Rust Language."
keywords = ["protobuf", "serialization"]
categories = ["encoding"]
version.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
edition.workspace = true
rust-version.workspace = true
[lib]
# https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options
bench = false
[features]
default = ["derive", "std"]
derive = ["dep:prost-derive"]
no-recursion-limit = []
std = []
[dependencies]
bytes = { version = "1", default-features = false }
prost-derive = { version = "0.14.1", path = "../prost-derive", optional = true }
[dev-dependencies]
criterion = { version = "0.7", default-features = false }
proptest = "1"
rand = "0.9"
[[bench]]
name = "varint"
harness = false

201
patch/prost-0.14.1/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,507 @@
[![continuous integration](https://github.com/tokio-rs/prost/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/tokio-rs/prost/actions/workflows/ci.yml?query=branch%3Amaster)
[![Documentation](https://docs.rs/prost/badge.svg)](https://docs.rs/prost/)
[![Crate](https://img.shields.io/crates/v/prost.svg)](https://crates.io/crates/prost)
[![Dependency Status](https://deps.rs/repo/github/tokio-rs/prost/status.svg)](https://deps.rs/repo/github/tokio-rs/prost)
[![Discord](https://img.shields.io/discord/500028886025895936)](https://discord.gg/tokio)
# *PROST!*
`prost` is a [Protocol Buffers](https://developers.google.com/protocol-buffers/)
implementation for the [Rust Language](https://www.rust-lang.org/). `prost`
generates simple, idiomatic Rust code from `proto2` and `proto3` files.
Compared to other Protocol Buffers implementations, `prost`
* Generates simple, idiomatic, and readable Rust types by taking advantage of
Rust `derive` attributes.
* Retains comments from `.proto` files in generated Rust code.
* Allows existing Rust types (not generated from a `.proto`) to be serialized
and deserialized by adding attributes.
* Uses the [`bytes::{Buf, BufMut}`](https://github.com/carllerche/bytes)
abstractions for serialization instead of `std::io::{Read, Write}`.
* Respects the Protobuf `package` specifier when organizing generated code
into Rust modules.
* Preserves unknown enum values during deserialization.
* Does not include support for runtime reflection or message descriptors.
## Using `prost` in a Cargo Project
First, add `prost` and its public dependencies to your `Cargo.toml`:
```ignore
[dependencies]
prost = "0.14"
# Only necessary if using Protobuf well-known types:
prost-types = "0.14"
```
The recommended way to add `.proto` compilation to a Cargo project is to use the
`prost-build` library. See the [`prost-build` documentation][prost-build] for
more details and examples.
See the [snazzy repository][snazzy] for a simple start-to-finish example.
[prost-build]: https://docs.rs/prost-build/latest/prost_build/
[snazzy]: https://github.com/danburkert/snazzy
### MSRV
`prost` follows the `tokio-rs` project's MSRV model and supports 1.70. For more
information on the tokio msrv policy you can check it out [here][tokio msrv]
[tokio msrv]: https://github.com/tokio-rs/tokio/#supported-rust-versions
## Generated Code
`prost` generates Rust code from source `.proto` files using the `proto2` or
`proto3` syntax. `prost`'s goal is to make the generated code as simple as
possible.
### `protoc`
With `prost-build` v0.11 release, `protoc` will be required to invoke
`compile_protos` (unless `skip_protoc` is enabled). Prost will no longer provide
bundled `protoc` or attempt to compile `protoc` for users. For install
instructions for `protoc`, please check out the [protobuf install] instructions.
[protobuf install]: https://github.com/protocolbuffers/protobuf#protobuf-compiler-installation
### Packages
Prost can now generate code for `.proto` files that don't have a package spec.
`prost` will translate the Protobuf package into
a Rust module. For example, given the `package` specifier:
[package]: https://developers.google.com/protocol-buffers/docs/proto#packages
```protobuf,ignore
package foo.bar;
```
All Rust types generated from the file will be in the `foo::bar` module.
### Messages
Given a simple message declaration:
```protobuf,ignore
// Sample message.
message Foo {
}
```
`prost` will generate the following Rust struct:
```rust,ignore
/// Sample message.
#[derive(Clone, Debug, PartialEq, Message)]
pub struct Foo {
}
```
### Fields
Fields in Protobuf messages are translated into Rust as public struct fields of the
corresponding type.
#### Scalar Values
Scalar value types are converted as follows:
| Protobuf Type | Rust Type |
| --- | --- |
| `double` | `f64` |
| `float` | `f32` |
| `int32` | `i32` |
| `int64` | `i64` |
| `uint32` | `u32` |
| `uint64` | `u64` |
| `sint32` | `i32` |
| `sint64` | `i64` |
| `fixed32` | `u32` |
| `fixed64` | `u64` |
| `sfixed32` | `i32` |
| `sfixed64` | `i64` |
| `bool` | `bool` |
| `string` | `String` |
| `bytes` | `Vec<u8>` |
#### Enumerations
All `.proto` enumeration types convert to the Rust `i32` type. Additionally,
each enumeration type gets a corresponding Rust `enum` type. For example, this
`proto` enum:
```protobuf,ignore
enum PhoneType {
MOBILE = 0;
HOME = 1;
WORK = 2;
}
```
gets this corresponding Rust enum [^1]:
```rust,ignore
pub enum PhoneType {
Mobile = 0,
Home = 1,
Work = 2,
}
```
[^1]: Annotations have been elided for clarity. See below for a full example.
You can convert a `PhoneType` value to an `i32` by doing:
```rust,ignore
PhoneType::Mobile as i32
```
The `#[derive(::prost::Enumeration)]` annotation added to the generated
`PhoneType` adds these associated functions to the type:
```rust,ignore
impl PhoneType {
pub fn is_valid(value: i32) -> bool { ... }
#[deprecated]
pub fn from_i32(value: i32) -> Option<PhoneType> { ... }
}
```
It also adds an `impl TryFrom<i32> for PhoneType`, so you can convert an `i32` to its corresponding `PhoneType` value by doing,
for example:
```rust,ignore
let phone_type = 2i32;
match PhoneType::try_from(phone_type) {
Ok(PhoneType::Mobile) => ...,
Ok(PhoneType::Home) => ...,
Ok(PhoneType::Work) => ...,
Err(_) => ...,
}
```
Additionally, wherever a `proto` enum is used as a field in a `Message`, the
message will have 'accessor' methods to get/set the value of the field as the
Rust enum type. For instance, this proto `PhoneNumber` message that has a field
named `type` of type `PhoneType`:
```protobuf,ignore
message PhoneNumber {
string number = 1;
PhoneType type = 2;
}
```
will become the following Rust type [^2] with methods `type` and `set_type`:
```rust,ignore
pub struct PhoneNumber {
pub number: String,
pub r#type: i32, // the `r#` is needed because `type` is a Rust keyword
}
impl PhoneNumber {
pub fn r#type(&self) -> PhoneType { ... }
pub fn set_type(&mut self, value: PhoneType) { ... }
}
```
Note that the getter methods will return the Rust enum's default value if the
field has an invalid `i32` value.
The `enum` type isn't used directly as a field, because the Protobuf spec
mandates that enumerations values are 'open', and decoding unrecognized
enumeration values must be possible.
[^2]: Annotations have been elided for clarity. See below for a full example.
#### Field Modifiers
Protobuf scalar value and enumeration message fields can have a modifier
depending on the Protobuf version. Modifiers change the corresponding type of
the Rust field:
| `.proto` Version | Modifier | Rust Type |
| --- | --- | --- |
| `proto2` | `optional` | `Option<T>` |
| `proto2` | `required` | `T` |
| `proto3` | default | `T` for scalar types, `Option<T>` otherwise |
| `proto3` | `optional` | `Option<T>` |
| `proto2`/`proto3` | `repeated` | `Vec<T>` |
Note that in `proto3` the default representation for all user-defined message
types is `Option<T>`, and for scalar types just `T` (during decoding, a missing
value is populated by `T::default()`). If you need a witness of the presence of
a scalar type `T`, use the `optional` modifier to enforce an `Option<T>`
representation in the generated Rust struct.
#### Map Fields
Map fields are converted to a Rust `HashMap` with key and value type converted
from the Protobuf key and value types.
#### Message Fields
Message fields are converted to the corresponding struct type. The table of
field modifiers above applies to message fields, except that `proto3` message
fields without a modifier (the default) will be wrapped in an `Option`.
Typically message fields are unboxed. `prost` will automatically box a message
field if the field type and the parent type are recursively nested in order to
avoid an infinite sized struct.
#### Oneof Fields
Oneof fields convert to a Rust enum. Protobuf `oneof`s types are not named, so
`prost` uses the name of the `oneof` field for the resulting Rust enum, and
defines the enum in a module under the struct. For example, a `proto3` message
such as:
```protobuf,ignore
message Foo {
oneof widget {
int32 quux = 1;
string bar = 2;
}
}
```
generates the following Rust[^3]:
```rust,ignore
pub struct Foo {
pub widget: Option<foo::Widget>,
}
pub mod foo {
pub enum Widget {
Quux(i32),
Bar(String),
}
}
```
`oneof` fields are always wrapped in an `Option`.
[^3]: Annotations have been elided for clarity. See below for a full example.
### Services
`prost-build` allows a custom code-generator to be used for processing `service`
definitions. This can be used to output Rust traits according to an
application's specific needs.
### Generated Code Example
Example `.proto` file:
```protobuf,ignore
syntax = "proto3";
package tutorial;
message Person {
string name = 1;
int32 id = 2; // Unique ID number for this person.
string email = 3;
enum PhoneType {
MOBILE = 0;
HOME = 1;
WORK = 2;
}
message PhoneNumber {
string number = 1;
PhoneType type = 2;
}
repeated PhoneNumber phones = 4;
}
// Our address book file is just one of these.
message AddressBook {
repeated Person people = 1;
}
```
and the generated Rust code (`tutorial.rs`):
```rust,ignore
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Person {
#[prost(string, tag="1")]
pub name: ::prost::alloc::string::String,
/// Unique ID number for this person.
#[prost(int32, tag="2")]
pub id: i32,
#[prost(string, tag="3")]
pub email: ::prost::alloc::string::String,
#[prost(message, repeated, tag="4")]
pub phones: ::prost::alloc::vec::Vec<person::PhoneNumber>,
}
/// Nested message and enum types in `Person`.
pub mod person {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PhoneNumber {
#[prost(string, tag="1")]
pub number: ::prost::alloc::string::String,
#[prost(enumeration="PhoneType", tag="2")]
pub r#type: i32,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum PhoneType {
Mobile = 0,
Home = 1,
Work = 2,
}
}
/// Our address book file is just one of these.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AddressBook {
#[prost(message, repeated, tag="1")]
pub people: ::prost::alloc::vec::Vec<Person>,
}
```
## Accessing the `protoc` `FileDescriptorSet`
The `prost_build::Config::file_descriptor_set_path` option can be used to emit a file descriptor set
during the build & code generation step. When used in conjunction with the `std::include_bytes`
macro and the `prost_types::FileDescriptorSet` type, applications and libraries using Prost can
implement introspection capabilities requiring details from the original `.proto` files.
## Using `prost` in a `no_std` Crate
`prost` is compatible with `no_std` crates. To enable `no_std` support, disable
the `std` features in `prost` and `prost-types`:
```ignore
[dependencies]
prost = { version = "0.14.1", default-features = false, features = ["derive"] }
# Only necessary if using Protobuf well-known types:
prost-types = { version = "0.14.1", default-features = false }
```
Additionally, configure `prost-build` to output `BTreeMap`s instead of `HashMap`s
for all Protobuf `map` fields in your `build.rs`:
```rust,ignore
let mut config = prost_build::Config::new();
config.btree_map(&["."]);
```
When using edition 2015, it may be necessary to add an `extern crate core;`
directive to the crate which includes `prost`-generated code.
## Serializing Existing Types
`prost` uses a custom derive macro to handle encoding and decoding types, which
means that if your existing Rust type is compatible with Protobuf types, you can
serialize and deserialize it by adding the appropriate derive and field
annotations.
Currently the best documentation on adding annotations is to look at the
generated code examples above.
### Tag Inference for Existing Types
Prost automatically infers tags for the struct.
Fields are tagged sequentially in the order they
are specified, starting with `1`.
You may skip tags which have been reserved, or where there are gaps between
sequentially occurring tag values by specifying the tag number to skip to with
the `tag` attribute on the first field after the gap. The following fields will
be tagged sequentially starting from the next number.
```rust,ignore
use prost;
use prost::{Enumeration, Message};
#[derive(Clone, PartialEq, Message)]
struct Person {
#[prost(string, tag = "1")]
pub id: String, // tag=1
// NOTE: Old "name" field has been removed
// pub name: String, // tag=2 (Removed)
#[prost(string, tag = "6")]
pub given_name: String, // tag=6
#[prost(string)]
pub family_name: String, // tag=7
#[prost(string)]
pub formatted_name: String, // tag=8
#[prost(uint32, tag = "3")]
pub age: u32, // tag=3
#[prost(uint32)]
pub height: u32, // tag=4
#[prost(enumeration = "Gender")]
pub gender: i32, // tag=5
// NOTE: Skip to less commonly occurring fields
#[prost(string, tag = "16")]
pub name_prefix: String, // tag=16 (eg. mr/mrs/ms)
#[prost(string)]
pub name_suffix: String, // tag=17 (eg. jr/esq)
#[prost(string)]
pub maiden_name: String, // tag=18
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Enumeration)]
pub enum Gender {
Unknown = 0,
Female = 1,
Male = 2,
}
```
## Nix
The prost project maintains flakes support for local development. Once you have
nix and nix flakes setup you can just run `nix develop` to get a shell
configured with the required dependencies to compile the whole project.
## Feature Flags
- `std`: Enable integration with standard library. Disable this feature for `no_std` support. This feature is enabled by default.
- `derive`: Enable integration with `prost-derive`. Disable this feature to reduce compile times. This feature is enabled by default.
- `prost-derive`: Deprecated. Alias for `derive` feature.
- `no-recursion-limit`: Disable the recursion limit. The recursion limit is 100 and cannot be customized.
## FAQ
1. **Could `prost` be implemented as a serializer for [Serde](https://serde.rs/)?**
Probably not, however I would like to hear from a Serde expert on the matter.
There are two complications with trying to serialize Protobuf messages with
Serde:
- Protobuf fields require a numbered tag, and currently there appears to be no
mechanism suitable for this in `serde`.
- The mapping of Protobuf type to Rust type is not 1-to-1. As a result,
trait-based approaches to dispatching don't work very well. Example: six
different Protobuf field types correspond to a Rust `Vec<i32>`: `repeated
int32`, `repeated sint32`, `repeated sfixed32`, and their packed
counterparts.
But it is possible to place `serde` derive tags onto the generated types, so
the same structure can support both `prost` and `Serde`.
2. **I get errors when trying to run `cargo test` on MacOS**
If the errors are about missing `autoreconf` or similar, you can probably fix
them by running
```ignore
brew install automake
brew install libtool
```
## License
`prost` is distributed under the terms of the Apache License (Version 2.0).
See [LICENSE](https://github.com/tokio-rs/prost/blob/master/LICENSE) for details.
Copyright 2022 Dan Burkert & Tokio Contributors

View File

@@ -0,0 +1,366 @@
use core::borrow::Borrow;
use core::{fmt, ops, str};
use core::str::pattern::{Pattern, ReverseSearcher, Searcher as _};
#[cfg(not(feature = "std"))]
use alloc::string::String;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use bytes::Bytes;
#[allow(unused)]
struct BytesUnsafeView {
ptr: *const u8,
len: usize,
// inlined "trait object"
data: core::sync::atomic::AtomicPtr<()>,
vtable: &'static Vtable,
}
#[allow(unused)]
struct Vtable {
/// fn(data, ptr, len)
clone: unsafe fn(&core::sync::atomic::AtomicPtr<()>, *const u8, usize) -> Bytes,
/// fn(data, ptr, len)
///
/// takes `Bytes` to value
to_vec: unsafe fn(&core::sync::atomic::AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
to_mut: unsafe fn(&core::sync::atomic::AtomicPtr<()>, *const u8, usize) -> bytes::BytesMut,
/// fn(data)
is_unique: unsafe fn(&core::sync::atomic::AtomicPtr<()>) -> bool,
/// fn(data, ptr, len)
drop: unsafe fn(&mut core::sync::atomic::AtomicPtr<()>, *const u8, usize),
}
impl BytesUnsafeView {
#[inline]
const fn from(src: Bytes) -> Self { unsafe { ::core::intrinsics::transmute(src) } }
#[inline]
const fn to(self) -> Bytes { unsafe { ::core::intrinsics::transmute(self) } }
}
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct ByteStr {
// Invariant: bytes contains valid UTF-8
bytes: Bytes,
}
impl ByteStr {
#[inline]
pub fn new() -> ByteStr {
ByteStr {
// Invariant: the empty slice is trivially valid UTF-8.
bytes: Bytes::new(),
}
}
#[inline]
pub const fn from_static(val: &'static str) -> ByteStr {
ByteStr {
// Invariant: val is a str so contains valid UTF-8.
bytes: Bytes::from_static(val.as_bytes()),
}
}
#[inline]
/// ## Panics
/// In a debug build this will panic if `bytes` is not valid UTF-8.
///
/// ## Safety
/// `bytes` must contain valid UTF-8. In a release build it is undefined
/// behavior to call this with `bytes` that is not valid UTF-8.
pub unsafe fn from_utf8_unchecked(bytes: Bytes) -> ByteStr {
if cfg!(debug_assertions) {
match str::from_utf8(&bytes.as_ref()) {
Ok(_) => (),
Err(err) => panic!(
"ByteStr::from_utf8_unchecked() with invalid bytes; error = {err}, bytes = {bytes:?}",
),
}
}
// Invariant: assumed by the safety requirements of this function.
ByteStr { bytes }
}
#[inline(always)]
pub fn from_utf8(bytes: Bytes) -> Result<ByteStr, str::Utf8Error> {
str::from_utf8(&bytes)?;
// Invariant: just checked is utf8
Ok(ByteStr { bytes })
}
#[inline]
pub const fn len(&self) -> usize { self.bytes.len() }
#[must_use]
#[inline(always)]
pub const fn as_bytes(&self) -> &Bytes { &self.bytes }
#[must_use]
#[inline]
pub unsafe fn slice_unchecked(&self, range: impl core::ops::RangeBounds<usize>) -> Self {
use core::ops::Bound;
let len = self.len();
let begin = match range.start_bound() {
Bound::Included(&n) => n,
Bound::Excluded(&n) => n + 1,
Bound::Unbounded => 0,
};
let end = match range.end_bound() {
Bound::Included(&n) => n + 1,
Bound::Excluded(&n) => n,
Bound::Unbounded => len,
};
if end == begin {
return ByteStr::new();
}
let mut ret = BytesUnsafeView::from(self.bytes.clone());
ret.len = end - begin;
ret.ptr = unsafe { ret.ptr.add(begin) };
Self { bytes: ret.to() }
}
#[inline]
pub fn split_once<P: Pattern>(&self, delimiter: P) -> Option<(ByteStr, ByteStr)> {
let (start, end) = delimiter.into_searcher(self).next_match()?;
// SAFETY: `Searcher` is known to return valid indices.
unsafe { Some((self.slice_unchecked(..start), self.slice_unchecked(end..))) }
}
#[inline]
pub fn rsplit_once<P: Pattern>(&self, delimiter: P) -> Option<(ByteStr, ByteStr)>
where for<'a> P::Searcher<'a>: ReverseSearcher<'a> {
let (start, end) = delimiter.into_searcher(self).next_match_back()?;
// SAFETY: `Searcher` is known to return valid indices.
unsafe { Some((self.slice_unchecked(..start), self.slice_unchecked(end..))) }
}
#[must_use]
#[inline(always)]
pub const unsafe fn as_bytes_mut(&mut self) -> &mut Bytes { &mut self.bytes }
#[inline]
pub fn clear(&mut self) { self.bytes.clear() }
}
unsafe impl Send for ByteStr {}
unsafe impl Sync for ByteStr {}
impl Clone for ByteStr {
#[inline]
fn clone(&self) -> ByteStr { Self { bytes: self.bytes.clone() } }
}
impl bytes::Buf for ByteStr {
#[inline]
fn remaining(&self) -> usize { self.bytes.remaining() }
#[inline]
fn chunk(&self) -> &[u8] { self.bytes.chunk() }
#[inline]
fn advance(&mut self, cnt: usize) { self.bytes.advance(cnt) }
#[inline]
fn copy_to_bytes(&mut self, len: usize) -> Bytes { self.bytes.copy_to_bytes(len) }
}
impl fmt::Debug for ByteStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) }
}
impl fmt::Display for ByteStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) }
}
impl ops::Deref for ByteStr {
type Target = str;
#[inline]
fn deref(&self) -> &str {
let b: &[u8] = self.bytes.as_ref();
// Safety: the invariant of `bytes` is that it contains valid UTF-8.
unsafe { str::from_utf8_unchecked(b) }
}
}
impl AsRef<str> for ByteStr {
#[inline]
fn as_ref(&self) -> &str { self }
}
impl AsRef<[u8]> for ByteStr {
#[inline]
fn as_ref(&self) -> &[u8] { self.bytes.as_ref() }
}
impl core::hash::Hash for ByteStr {
#[inline]
fn hash<H>(&self, state: &mut H)
where H: core::hash::Hasher {
self.bytes.hash(state)
}
}
impl Borrow<str> for ByteStr {
#[inline]
fn borrow(&self) -> &str { &**self }
}
impl Borrow<[u8]> for ByteStr {
#[inline]
fn borrow(&self) -> &[u8] { self.bytes.borrow() }
}
impl PartialEq<str> for ByteStr {
#[inline]
fn eq(&self, other: &str) -> bool { &**self == other }
}
impl PartialEq<&str> for ByteStr {
#[inline]
fn eq(&self, other: &&str) -> bool { &**self == *other }
}
impl PartialEq<ByteStr> for str {
#[inline]
fn eq(&self, other: &ByteStr) -> bool { self == &**other }
}
impl PartialEq<ByteStr> for &str {
#[inline]
fn eq(&self, other: &ByteStr) -> bool { *self == &**other }
}
impl PartialEq<String> for ByteStr {
#[inline]
fn eq(&self, other: &String) -> bool { &**self == other.as_str() }
}
impl PartialEq<&String> for ByteStr {
#[inline]
fn eq(&self, other: &&String) -> bool { &**self == other.as_str() }
}
impl PartialEq<ByteStr> for String {
#[inline]
fn eq(&self, other: &ByteStr) -> bool { self.as_str() == &**other }
}
impl PartialEq<ByteStr> for &String {
#[inline]
fn eq(&self, other: &ByteStr) -> bool { self.as_str() == &**other }
}
// impl From
impl Default for ByteStr {
#[inline]
fn default() -> ByteStr { ByteStr::new() }
}
impl From<String> for ByteStr {
#[inline]
fn from(src: String) -> ByteStr {
ByteStr {
// Invariant: src is a String so contains valid UTF-8.
bytes: Bytes::from(src),
}
}
}
impl<'a> From<&'a str> for ByteStr {
#[inline]
fn from(src: &'a str) -> ByteStr {
ByteStr {
// Invariant: src is a str so contains valid UTF-8.
bytes: Bytes::copy_from_slice(src.as_bytes()),
}
}
}
impl From<ByteStr> for Bytes {
#[inline(always)]
fn from(src: ByteStr) -> Self { src.bytes }
}
impl serde::Serialize for ByteStr {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
serializer.serialize_str(&**self)
}
}
struct ByteStrVisitor;
impl<'de> serde::de::Visitor<'de> for ByteStrVisitor {
type Value = ByteStr;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a UTF-8 string")
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where E: serde::de::Error {
Ok(ByteStr::from(v))
}
#[inline]
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where E: serde::de::Error {
Ok(ByteStr::from(v))
}
#[inline]
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where E: serde::de::Error {
match str::from_utf8(v) {
Ok(s) => Ok(ByteStr::from(s)),
Err(e) => Err(E::custom(format_args!("invalid UTF-8: {e}"))),
}
}
#[inline]
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where E: serde::de::Error {
match String::from_utf8(v) {
Ok(s) => Ok(ByteStr::from(s)),
Err(e) => Err(E::custom(format_args!("invalid UTF-8: {}", e.utf8_error()))),
}
}
#[inline]
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where V: serde::de::SeqAccess<'de> {
use serde::de::Error as _;
let len = core::cmp::min(seq.size_hint().unwrap_or(0), 4096);
let mut bytes: Vec<u8> = Vec::with_capacity(len);
while let Some(value) = seq.next_element()? {
bytes.push(value);
}
match String::from_utf8(bytes) {
Ok(s) => Ok(ByteStr::from(s)),
Err(e) => Err(V::Error::custom(format_args!("invalid UTF-8: {}", e.utf8_error()))),
}
}
}
impl<'de> serde::Deserialize<'de> for ByteStr {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<ByteStr, D::Error>
where D: serde::Deserializer<'de> {
deserializer.deserialize_string(ByteStrVisitor)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
use ::bytes::{Buf, BufMut};
use super::wire_type::WireType;
use crate::error::DecodeError;
use alloc::string::ToString as _;
macro_rules! fixed {
($ty:ty, $proto_ty:ident, $wire_type:ident, $put:ident, $try_get:ident) => {
pub mod $proto_ty {
use super::*;
pub const WIRE_TYPE: WireType = WireType::$wire_type;
pub const SIZE: usize = core::mem::size_of::<$ty>();
#[inline(always)]
pub fn encode_fixed(value: $ty, buf: &mut impl BufMut) { buf.$put(value); }
#[inline(always)]
pub fn decode_fixed(buf: &mut impl Buf) -> Result<$ty, DecodeError> {
buf.$try_get().map_err(|e| DecodeError::new(e.to_string()))
}
}
};
}
fixed!(f32, float, ThirtyTwoBit, put_f32_le, try_get_f32_le);
fixed!(f64, double, SixtyFourBit, put_f64_le, try_get_f64_le);
fixed!(u32, fixed32, ThirtyTwoBit, put_u32_le, try_get_u32_le);
fixed!(u64, fixed64, SixtyFourBit, put_u64_le, try_get_u64_le);
fixed!(i32, sfixed32, ThirtyTwoBit, put_i32_le, try_get_i32_le);
fixed!(i64, sfixed64, SixtyFourBit, put_i64_le, try_get_i64_le);

View File

@@ -0,0 +1,46 @@
pub use crate::{
error::{DecodeError, EncodeError, UnknownEnumValue},
message::Message,
// name::Name,
};
use ::bytes::{Buf, BufMut};
use crate::encoding::varint::usize::{decode_varint, encode_varint, encoded_len_varint};
/// Encodes a length delimiter to the buffer.
///
/// See [Message.encode_length_delimited] for more info.
///
/// An error will be returned if the buffer does not have sufficient capacity to encode the
/// delimiter.
pub fn encode_length_delimiter(length: usize, buf: &mut impl BufMut) -> Result<(), EncodeError> {
let required = encoded_len_varint(length);
let remaining = buf.remaining_mut();
if required > remaining {
return Err(EncodeError::new(required, remaining));
}
encode_varint(length, buf);
Ok(())
}
/// Returns the encoded length of a length delimiter.
///
/// Applications may use this method to ensure sufficient buffer capacity before calling
/// `encode_length_delimiter`. The returned size will be between 1 and 10, inclusive.
#[inline]
pub fn length_delimiter_len(length: usize) -> usize { encoded_len_varint(length) }
/// Decodes a length delimiter from the buffer.
///
/// This method allows the length delimiter to be decoded independently of the message, when the
/// message is encoded with [Message.encode_length_delimited].
///
/// An error may be returned in two cases:
///
/// * If the supplied buffer contains fewer than 10 bytes, then an error indicates that more
/// input is required to decode the full delimiter.
/// * If the supplied buffer contains 10 bytes or more, then the buffer contains an invalid
/// delimiter, and typically the buffer should be considered corrupt.
#[inline]
pub fn decode_length_delimiter(mut buf: impl Buf) -> Result<usize, DecodeError> { decode_varint(&mut buf) }

View File

@@ -0,0 +1,216 @@
#![allow(unused)]
mod ascii;
#[cfg(any(
target_feature = "sse2",
all(target_endian = "little", target_arch = "aarch64"),
all(target_endian = "little", target_feature = "neon")
))]
mod simd_funcs;
use ascii::validate_ascii;
use ::core::intrinsics::likely;
#[inline(always)]
fn in_inclusive_range8(i: u8, start: u8, end: u8) -> bool {
i.wrapping_sub(start) <= (end - start)
}
#[repr(align(64))] // Align to cache lines
pub struct Utf8Data {
pub table: [u8; 384],
}
// BEGIN GENERATED CODE. PLEASE DO NOT EDIT.
// Instead, please regenerate using generate-encoding-data.py
pub static UTF8_DATA: Utf8Data = Utf8Data {
table: [
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 148, 148, 148,
148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 164, 164, 164, 164, 164,
164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164,
164, 164, 164, 164, 164, 164, 164, 164, 164, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 16, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 32, 8, 8, 64, 8, 8, 8, 128, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
],
};
// END GENERATED CODE
pub fn utf8_valid_up_to(src: &[u8]) -> usize {
let mut read = 0;
'outer: loop {
let mut byte = {
let src_remaining = &src[read..];
match validate_ascii(src_remaining) {
None => {
return src.len();
}
Some((non_ascii, consumed)) => {
read += consumed;
non_ascii
}
}
};
// Check for the longest sequence to avoid checking twice for the
// multi-byte sequences. This can't overflow with 64-bit address space,
// because full 64 bits aren't in use. In the 32-bit PAE case, for this
// to overflow would mean that the source slice would be so large that
// the address space of the process would not have space for any code.
// Therefore, the slice cannot be so long that this would overflow.
if likely(read + 4 <= src.len()) {
'inner: loop {
// At this point, `byte` is not included in `read`, because we
// don't yet know that a) the UTF-8 sequence is valid and b) that there
// is output space if it is an astral sequence.
// Inspecting the lead byte directly is faster than what the
// std lib does!
if likely(in_inclusive_range8(byte, 0xC2, 0xDF)) {
// Two-byte
let second = unsafe { *(src.get_unchecked(read + 1)) };
if !in_inclusive_range8(second, 0x80, 0xBF) {
break 'outer;
}
read += 2;
// Next lead (manually inlined)
if likely(read + 4 <= src.len()) {
byte = unsafe { *(src.get_unchecked(read)) };
if byte < 0x80 {
read += 1;
continue 'outer;
}
continue 'inner;
}
break 'inner;
}
if likely(byte < 0xF0) {
'three: loop {
// Three-byte
let second = unsafe { *(src.get_unchecked(read + 1)) };
let third = unsafe { *(src.get_unchecked(read + 2)) };
if ((UTF8_DATA.table[usize::from(second)]
& unsafe { *(UTF8_DATA.table.get_unchecked(byte as usize + 0x80)) })
| (third >> 6))
!= 2
{
break 'outer;
}
read += 3;
// Next lead (manually inlined)
if likely(read + 4 <= src.len()) {
byte = unsafe { *(src.get_unchecked(read)) };
if in_inclusive_range8(byte, 0xE0, 0xEF) {
continue 'three;
}
if likely(byte < 0x80) {
read += 1;
continue 'outer;
}
continue 'inner;
}
break 'inner;
}
}
// Four-byte
let second = unsafe { *(src.get_unchecked(read + 1)) };
let third = unsafe { *(src.get_unchecked(read + 2)) };
let fourth = unsafe { *(src.get_unchecked(read + 3)) };
if (u16::from(
UTF8_DATA.table[usize::from(second)]
& unsafe { *(UTF8_DATA.table.get_unchecked(byte as usize + 0x80)) },
) | u16::from(third >> 6)
| (u16::from(fourth & 0xC0) << 2))
!= 0x202
{
break 'outer;
}
read += 4;
// Next lead
if likely(read + 4 <= src.len()) {
byte = unsafe { *(src.get_unchecked(read)) };
if byte < 0x80 {
read += 1;
continue 'outer;
}
continue 'inner;
}
break 'inner;
}
}
// We can't have a complete 4-byte sequence, but we could still have
// one to three shorter sequences.
'tail: loop {
// >= is better for bound check elision than ==
if read >= src.len() {
break 'outer;
}
byte = src[read];
// At this point, `byte` is not included in `read`, because we
// don't yet know that a) the UTF-8 sequence is valid and b) that there
// is output space if it is an astral sequence.
// Inspecting the lead byte directly is faster than what the
// std lib does!
if byte < 0x80 {
read += 1;
continue 'tail;
}
if in_inclusive_range8(byte, 0xC2, 0xDF) {
// Two-byte
let new_read = read + 2;
if new_read > src.len() {
break 'outer;
}
let second = src[read + 1];
if !in_inclusive_range8(second, 0x80, 0xBF) {
break 'outer;
}
read += 2;
continue 'tail;
}
// We need to exclude valid four byte lead bytes, because
// `UTF8_DATA.second_mask` covers
if byte < 0xF0 {
// Three-byte
let new_read = read + 3;
if new_read > src.len() {
break 'outer;
}
let second = src[read + 1];
let third = src[read + 2];
if ((UTF8_DATA.table[usize::from(second)]
& unsafe { *(UTF8_DATA.table.get_unchecked(byte as usize + 0x80)) })
| (third >> 6))
!= 2
{
break 'outer;
}
read += 3;
// `'tail` handles sequences shorter than 4, so
// there can't be another sequence after this one.
break 'outer;
}
break 'outer;
}
}
read
}
#[inline(always)]
pub fn is_vaild_utf8(v:&[u8]) -> bool { utf8_valid_up_to(v) == v.len() }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,347 @@
// Copyright Mozilla Foundation. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use any_all_workaround::all_mask16x8;
use any_all_workaround::all_mask8x16;
use any_all_workaround::any_mask16x8;
use any_all_workaround::any_mask8x16;
use core::simd::cmp::SimdPartialEq;
use core::simd::cmp::SimdPartialOrd;
use core::simd::simd_swizzle;
use core::simd::u16x8;
use core::simd::u8x16;
use core::simd::ToBytes;
use cfg_if::cfg_if;
// TODO: Migrate unaligned access to stdlib code if/when the RFC
// https://github.com/rust-lang/rfcs/pull/1725 is implemented.
/// Safety invariant: ptr must be valid for an unaligned read of 16 bytes
#[inline(always)]
pub unsafe fn load16_unaligned(ptr: *const u8) -> u8x16 {
let mut simd = ::core::mem::MaybeUninit::<u8x16>::uninit();
::core::ptr::copy_nonoverlapping(ptr, simd.as_mut_ptr() as *mut u8, 16);
// Safety: copied 16 bytes of initialized memory into this, it is now initialized
simd.assume_init()
}
/// Safety invariant: ptr must be valid for an aligned-for-u8x16 read of 16 bytes
#[allow(dead_code)]
#[inline(always)]
pub unsafe fn load16_aligned(ptr: *const u8) -> u8x16 {
*(ptr as *const u8x16)
}
/// Safety invariant: ptr must be valid for an unaligned store of 16 bytes
#[inline(always)]
pub unsafe fn store16_unaligned(ptr: *mut u8, s: u8x16) {
::core::ptr::copy_nonoverlapping(&s as *const u8x16 as *const u8, ptr, 16);
}
/// Safety invariant: ptr must be valid for an aligned-for-u8x16 store of 16 bytes
#[allow(dead_code)]
#[inline(always)]
pub unsafe fn store16_aligned(ptr: *mut u8, s: u8x16) {
*(ptr as *mut u8x16) = s;
}
/// Safety invariant: ptr must be valid for an unaligned read of 16 bytes
#[inline(always)]
pub unsafe fn load8_unaligned(ptr: *const u16) -> u16x8 {
let mut simd = ::core::mem::MaybeUninit::<u16x8>::uninit();
::core::ptr::copy_nonoverlapping(ptr as *const u8, simd.as_mut_ptr() as *mut u8, 16);
// Safety: copied 16 bytes of initialized memory into this, it is now initialized
simd.assume_init()
}
/// Safety invariant: ptr must be valid for an aligned-for-u16x8 read of 16 bytes
#[allow(dead_code)]
#[inline(always)]
pub unsafe fn load8_aligned(ptr: *const u16) -> u16x8 {
*(ptr as *const u16x8)
}
/// Safety invariant: ptr must be valid for an unaligned store of 16 bytes
#[inline(always)]
pub unsafe fn store8_unaligned(ptr: *mut u16, s: u16x8) {
::core::ptr::copy_nonoverlapping(&s as *const u16x8 as *const u8, ptr as *mut u8, 16);
}
/// Safety invariant: ptr must be valid for an aligned-for-u16x8 store of 16 bytes
#[allow(dead_code)]
#[inline(always)]
pub unsafe fn store8_aligned(ptr: *mut u16, s: u16x8) {
*(ptr as *mut u16x8) = s;
}
cfg_if! {
if #[cfg(all(target_feature = "sse2", target_arch = "x86_64"))] {
use core::arch::x86_64::_mm_movemask_epi8;
use core::arch::x86_64::_mm_packus_epi16;
} else if #[cfg(all(target_feature = "sse2", target_arch = "x86"))] {
use core::arch::x86::_mm_movemask_epi8;
use core::arch::x86::_mm_packus_epi16;
} else if #[cfg(target_arch = "aarch64")]{
use core::arch::aarch64::vmaxvq_u8;
use core::arch::aarch64::vmaxvq_u16;
} else {
}
}
// #[inline(always)]
// fn simd_byte_swap_u8(s: u8x16) -> u8x16 {
// unsafe {
// shuffle!(s, s, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
// }
// }
// #[inline(always)]
// pub fn simd_byte_swap(s: u16x8) -> u16x8 {
// to_u16_lanes(simd_byte_swap_u8(to_u8_lanes(s)))
// }
#[inline(always)]
pub fn simd_byte_swap(s: u16x8) -> u16x8 {
let left = s << 8;
let right = s >> 8;
left | right
}
#[inline(always)]
pub fn to_u16_lanes(s: u8x16) -> u16x8 {
u16x8::from_ne_bytes(s)
}
cfg_if! {
if #[cfg(target_feature = "sse2")] {
// Expose low-level mask instead of higher-level conclusion,
// because the non-ASCII case would perform less well otherwise.
// Safety-usable invariant: This returned value is whether each high bit is set
#[inline(always)]
pub fn mask_ascii(s: u8x16) -> i32 {
unsafe {
_mm_movemask_epi8(s.into())
}
}
} else {
}
}
cfg_if! {
if #[cfg(target_feature = "sse2")] {
#[inline(always)]
pub fn simd_is_ascii(s: u8x16) -> bool {
unsafe {
// Safety: We have cfg()d the correct platform
_mm_movemask_epi8(s.into()) == 0
}
}
} else if #[cfg(target_arch = "aarch64")]{
#[inline(always)]
pub fn simd_is_ascii(s: u8x16) -> bool {
unsafe {
// Safety: We have cfg()d the correct platform
vmaxvq_u8(s.into()) < 0x80
}
}
} else {
#[inline(always)]
pub fn simd_is_ascii(s: u8x16) -> bool {
// This optimizes better on ARM than
// the lt formulation.
let highest_ascii = u8x16::splat(0x7F);
!any_mask8x16(s.simd_gt(highest_ascii))
}
}
}
cfg_if! {
if #[cfg(target_feature = "sse2")] {
#[inline(always)]
pub fn simd_is_str_latin1(s: u8x16) -> bool {
if simd_is_ascii(s) {
return true;
}
let above_str_latin1 = u8x16::splat(0xC4);
s.simd_lt(above_str_latin1).all()
}
} else if #[cfg(target_arch = "aarch64")]{
#[inline(always)]
pub fn simd_is_str_latin1(s: u8x16) -> bool {
unsafe {
// Safety: We have cfg()d the correct platform
vmaxvq_u8(s.into()) < 0xC4
}
}
} else {
#[inline(always)]
pub fn simd_is_str_latin1(s: u8x16) -> bool {
let above_str_latin1 = u8x16::splat(0xC4);
all_mask8x16(s.simd_lt(above_str_latin1))
}
}
}
cfg_if! {
if #[cfg(target_arch = "aarch64")]{
#[inline(always)]
pub fn simd_is_basic_latin(s: u16x8) -> bool {
unsafe {
// Safety: We have cfg()d the correct platform
vmaxvq_u16(s.into()) < 0x80
}
}
#[inline(always)]
pub fn simd_is_latin1(s: u16x8) -> bool {
unsafe {
// Safety: We have cfg()d the correct platform
vmaxvq_u16(s.into()) < 0x100
}
}
} else {
#[inline(always)]
pub fn simd_is_basic_latin(s: u16x8) -> bool {
let above_ascii = u16x8::splat(0x80);
all_mask16x8(s.simd_lt(above_ascii))
}
#[inline(always)]
pub fn simd_is_latin1(s: u16x8) -> bool {
// For some reason, on SSE2 this formulation
// seems faster in this case while the above
// function is better the other way round...
let highest_latin1 = u16x8::splat(0xFF);
!any_mask16x8(s.simd_gt(highest_latin1))
}
}
}
#[inline(always)]
pub fn contains_surrogates(s: u16x8) -> bool {
let mask = u16x8::splat(0xF800);
let surrogate_bits = u16x8::splat(0xD800);
any_mask16x8((s & mask).simd_eq(surrogate_bits))
}
cfg_if! {
if #[cfg(target_arch = "aarch64")]{
macro_rules! aarch64_return_false_if_below_hebrew {
($s:ident) => ({
unsafe {
// Safety: We have cfg()d the correct platform
if vmaxvq_u16($s.into()) < 0x0590 {
return false;
}
}
})
}
macro_rules! non_aarch64_return_false_if_all {
($s:ident) => ()
}
} else {
macro_rules! aarch64_return_false_if_below_hebrew {
($s:ident) => ()
}
macro_rules! non_aarch64_return_false_if_all {
($s:ident) => ({
if all_mask16x8($s) {
return false;
}
})
}
}
}
macro_rules! in_range16x8 {
($s:ident, $start:expr, $end:expr) => {{
// SIMD sub is wrapping
($s - u16x8::splat($start)).simd_lt(u16x8::splat($end - $start))
}};
}
#[inline(always)]
pub fn is_u16x8_bidi(s: u16x8) -> bool {
// We try to first quickly refute the RTLness of the vector. If that
// fails, we do the real RTL check, so in that case we end up wasting
// the work for the up-front quick checks. Even the quick-check is
// two-fold in order to return `false` ASAP if everything is below
// Hebrew.
aarch64_return_false_if_below_hebrew!(s);
let below_hebrew = s.simd_lt(u16x8::splat(0x0590));
non_aarch64_return_false_if_all!(below_hebrew);
if all_mask16x8(
below_hebrew | in_range16x8!(s, 0x0900, 0x200F) | in_range16x8!(s, 0x2068, 0xD802),
) {
return false;
}
// Quick refutation failed. Let's do the full check.
any_mask16x8(
(in_range16x8!(s, 0x0590, 0x0900)
| in_range16x8!(s, 0xFB1D, 0xFE00)
| in_range16x8!(s, 0xFE70, 0xFEFF)
| in_range16x8!(s, 0xD802, 0xD804)
| in_range16x8!(s, 0xD83A, 0xD83C)
| s.simd_eq(u16x8::splat(0x200F))
| s.simd_eq(u16x8::splat(0x202B))
| s.simd_eq(u16x8::splat(0x202E))
| s.simd_eq(u16x8::splat(0x2067))),
)
}
#[inline(always)]
pub fn simd_unpack(s: u8x16) -> (u16x8, u16x8) {
let first: u8x16 = simd_swizzle!(
s,
u8x16::splat(0),
[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
);
let second: u8x16 = simd_swizzle!(
s,
u8x16::splat(0),
[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
);
(u16x8::from_ne_bytes(first), u16x8::from_ne_bytes(second))
}
cfg_if! {
if #[cfg(target_feature = "sse2")] {
#[inline(always)]
pub fn simd_pack(a: u16x8, b: u16x8) -> u8x16 {
unsafe {
// Safety: We have cfg()d the correct platform
_mm_packus_epi16(a.into(), b.into()).into()
}
}
} else {
#[inline(always)]
pub fn simd_pack(a: u16x8, b: u16x8) -> u8x16 {
let first: u8x16 = a.to_ne_bytes();
let second: u8x16 = b.to_ne_bytes();
simd_swizzle!(
first,
second,
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
)
}
}
}

View File

@@ -0,0 +1,667 @@
#![allow(unsafe_op_in_unsafe_fn)]
use ::bytes::{Buf, BufMut};
use ::core::intrinsics::{assume, likely, unchecked_shl, unchecked_shr, unlikely};
use crate::error::DecodeError;
/// ZigZag 编码 32 位整数
#[inline(always)]
pub const fn encode_zigzag32(value: i32) -> u32 {
unsafe { (unchecked_shl(value, 1u8) ^ unchecked_shr(value, 31u8)) as u32 }
}
/// ZigZag 解码 32 位整数
#[inline(always)]
pub const fn decode_zigzag32(value: u32) -> i32 {
unsafe { (unchecked_shr(value, 1u8) as i32) ^ (-((value & 1) as i32)) }
}
/// ZigZag 编码 64 位整数
#[inline(always)]
pub const fn encode_zigzag64(value: i64) -> u64 {
unsafe { (unchecked_shl(value, 1u8) ^ unchecked_shr(value, 63u8)) as u64 }
}
/// ZigZag 解码 64 位整数
#[inline(always)]
pub const fn decode_zigzag64(value: u64) -> i64 {
unsafe { (unchecked_shr(value, 1u8) as i64) ^ (-((value & 1) as i64)) }
}
/// The maximum number of bytes a Protobuf Varint can occupy.
const VARINT64_MAX_LEN: usize = 10;
/// Encodes an integer value into LEB128 variable length format, and writes it to the buffer.
///
/// Dispatches to a fast path if the buffer has enough contiguous space,
/// otherwise falls back to a slower, byte-by-byte write.
#[inline]
pub fn encode_varint64(value: u64, buf: &mut impl BufMut) -> usize {
let len = encoded_len_varint64(value);
// If there is enough contiguous space, use the optimized path.
if likely(buf.chunk_mut().len() >= len) {
// Safety: The check above guarantees `buf.chunk_mut()` has at least `len` bytes.
unsafe { encode_varint64_fast(value, len, buf) };
} else {
encode_varint64_slow(value, len, buf);
}
len
}
/// Fast-path for encoding to a contiguous buffer slice.
///
/// ## Safety
///
/// The caller must ensure `buf.chunk_mut().len() >= len`.
#[inline(always)]
unsafe fn encode_varint64_fast(mut value: u64, len: usize, buf: &mut impl BufMut) {
let ptr = buf.chunk_mut().as_mut_ptr();
for i in 0..(len - 1) {
*ptr.add(i) = (value & 0x7F) as u8 | 0x80;
value >>= 7;
}
// After the loop, `value` holds the last byte, which must not have the continuation bit.
// The `encoded_len_varint` logic guarantees this.
assume(value < 0x80);
*ptr.add(len - 1) = value as u8;
// Notify the buffer that `len` bytes have been written.
buf.advance_mut(len);
}
/// Slow-path encoding for buffers that may not be contiguous.
#[cold]
#[inline(never)]
fn encode_varint64_slow(mut value: u64, len: usize, buf: &mut impl BufMut) {
for _ in 0..(len - 1) {
buf.put_u8((value & 0x7F) as u8 | 0x80);
value >>= 7;
}
// After the loop, `value` holds the last byte, which must not have the continuation bit.
// The `encoded_len_varint` logic guarantees this.
unsafe { assume(value < 0x80) };
buf.put_u8(value as u8);
}
/// Returns the encoded length of the value in LEB128 variable length format.
/// The returned value will be between 1 and 10, inclusive.
#[inline]
pub const fn encoded_len_varint64(value: u64) -> usize {
unsafe {
let value = value
.bit_width()
.unchecked_mul(9)
.unbounded_shr(6)
.unchecked_add(1);
assume(value >= 1 && value <= VARINT64_MAX_LEN as u32);
value as usize
}
}
/// Decodes a LEB128-encoded variable length integer from the buffer.
#[inline]
pub fn decode_varint64(buf: &mut impl Buf) -> Result<u64, DecodeError> {
fn inner(buf: &mut impl Buf) -> Option<u64> {
let bytes = buf.chunk();
let len = bytes.len();
if unlikely(len == 0) {
return None;
}
// Fast path for single-byte varints.
let first = unsafe { *bytes.get_unchecked(0) };
if likely(first < 0x80) {
buf.advance(1);
return Some(first as _);
}
// If the chunk is large enough or the varint is known to terminate within it,
// use the fast path which operates on a slice.
if likely(len >= VARINT64_MAX_LEN || bytes[len - 1] < 0x80) {
return decode_varint64_fast(bytes).map(|(value, advance)| {
buf.advance(advance);
value
});
}
// Fallback for varints that cross chunk boundaries.
decode_varint64_slow(buf)
}
inner(buf).ok_or(DecodeError::new("invalid varint64"))
}
/// Fast-path decoding of a varint from a contiguous memory slice.
///
/// ## Safety
///
/// Assumes `bytes` contains a complete varint or is at least `VARINT64_MAX_LEN` bytes long.
#[inline(always)]
fn decode_varint64_fast(bytes: &[u8]) -> Option<(u64, usize)> {
let ptr = bytes.as_ptr();
let mut value = 0u64;
for i in 0..VARINT64_MAX_LEN {
let byte = unsafe { *ptr.add(i) };
value |= ((byte & 0x7F) as u64) << (i * 7);
if byte < 0x80 {
// Check for overlong encoding on the 10th byte.
if unlikely(i == 9 && byte > 1) {
return None;
}
return Some((value, i + 1));
}
}
// A varint must not be longer than 10 bytes.
None
}
/// Slow-path decoding for varints that may cross `Buf` chunk boundaries.
#[cold]
#[inline(never)]
fn decode_varint64_slow(buf: &mut impl Buf) -> Option<u64> {
// Safety: The dispatcher `decode_varint` only calls this function if `bytes[0] >= 0x80`.
// This hint allows the compiler to optimize the first loop iteration.
unsafe { assume(buf.chunk().len() > 0 && buf.chunk()[0] >= 0x80) };
let mut value = 0u64;
for i in 0..VARINT64_MAX_LEN {
if unlikely(!buf.has_remaining()) {
return None; // Unexpected end of buffer.
}
let byte = buf.get_u8();
value |= ((byte & 0x7F) as u64) << (i * 7);
if byte < 0x80 {
// Check for overlong encoding on the 10th byte.
if unlikely(i == 9 && byte > 1) {
return None;
}
return Some(value);
}
}
// A varint must not be longer than 10 bytes.
None
}
/// The maximum number of bytes a Protobuf Varint can occupy.
const VARINT32_MAX_LEN: usize = 5;
/// Encodes an integer value into LEB128 variable length format, and writes it to the buffer.
///
/// Dispatches to a fast path if the buffer has enough contiguous space,
/// otherwise falls back to a slower, byte-by-byte write.
#[inline]
pub fn encode_varint32(value: u32, buf: &mut impl BufMut) -> usize {
let len = encoded_len_varint32(value);
// If there is enough contiguous space, use the optimized path.
if likely(buf.chunk_mut().len() >= len) {
// Safety: The check above guarantees `buf.chunk_mut()` has at least `len` bytes.
unsafe { encode_varint32_fast(value, len, buf) };
} else {
encode_varint32_slow(value, len, buf);
}
len
}
/// Fast-path for encoding to a contiguous buffer slice.
///
/// ## Safety
///
/// The caller must ensure `buf.chunk_mut().len() >= len`.
#[inline(always)]
unsafe fn encode_varint32_fast(mut value: u32, len: usize, buf: &mut impl BufMut) {
let ptr = buf.chunk_mut().as_mut_ptr();
for i in 0..(len - 1) {
*ptr.add(i) = (value & 0x7F) as u8 | 0x80;
value >>= 7;
}
// After the loop, `value` holds the last byte, which must not have the continuation bit.
// The `encoded_len_varint` logic guarantees this.
assume(value < 0x80);
*ptr.add(len - 1) = value as u8;
// Notify the buffer that `len` bytes have been written.
buf.advance_mut(len);
}
/// Slow-path encoding for buffers that may not be contiguous.
#[cold]
#[inline(never)]
fn encode_varint32_slow(mut value: u32, len: usize, buf: &mut impl BufMut) {
for _ in 0..(len - 1) {
buf.put_u8((value & 0x7F) as u8 | 0x80);
value >>= 7;
}
// After the loop, `value` holds the last byte, which must not have the continuation bit.
// The `encoded_len_varint` logic guarantees this.
unsafe { assume(value < 0x80) };
buf.put_u8(value as u8);
}
/// Returns the encoded length of the value in LEB128 variable length format.
/// The returned value will be between 1 and 5, inclusive.
#[inline]
pub const fn encoded_len_varint32(value: u32) -> usize {
unsafe {
let value = value
.bit_width()
.unchecked_mul(9)
.unbounded_shr(6)
.unchecked_add(1);
assume(value >= 1 && value <= VARINT32_MAX_LEN as u32);
value as usize
}
}
/// Decodes a LEB128-encoded variable length integer from the buffer.
#[inline]
pub fn decode_varint32(buf: &mut impl Buf) -> Result<u32, DecodeError> {
#[inline(always)]
fn inner(buf: &mut impl Buf) -> Option<u32> {
let bytes = buf.chunk();
let len = bytes.len();
if unlikely(len == 0) {
return None;
}
// Fast path for single-byte varints.
let first = unsafe { *bytes.get_unchecked(0) };
if likely(first < 0x80) {
buf.advance(1);
return Some(first as _);
}
// If the chunk is large enough or the varint is known to terminate within it,
// use the fast path which operates on a slice.
if likely(len >= VARINT32_MAX_LEN || bytes[len - 1] < 0x80) {
return decode_varint32_fast(bytes).map(|(value, advance)| {
buf.advance(advance);
value
});
}
// Fallback for varints that cross chunk boundaries.
decode_varint32_slow(buf)
}
inner(buf).ok_or(DecodeError::new("invalid varint32"))
}
/// Fast-path decoding of a varint from a contiguous memory slice.
///
/// ## Safety
///
/// Assumes `bytes` contains a complete varint or is at least `VARINT32_MAX_LEN` bytes long.
#[inline(always)]
fn decode_varint32_fast(bytes: &[u8]) -> Option<(u32, usize)> {
let ptr = bytes.as_ptr();
let mut value = 0u32;
for i in 0..VARINT32_MAX_LEN {
let byte = unsafe { *ptr.add(i) };
value |= ((byte & 0x7F) as u32) << (i * 7);
if byte < 0x80 {
// Check for overlong encoding on the 5th byte.
if unlikely(i == 4 && byte > 4) {
return None;
}
return Some((value, i + 1));
}
}
// A varint must not be longer than 5 bytes.
None
}
/// Slow-path decoding for varints that may cross `Buf` chunk boundaries.
#[cold]
#[inline(never)]
fn decode_varint32_slow(buf: &mut impl Buf) -> Option<u32> {
// Safety: The dispatcher `decode_varint` only calls this function if `bytes[0] >= 0x80`.
// This hint allows the compiler to optimize the first loop iteration.
unsafe { assume(buf.chunk().len() > 0 && buf.chunk()[0] >= 0x80) };
let mut value = 0u32;
for i in 0..VARINT32_MAX_LEN {
if unlikely(!buf.has_remaining()) {
return None; // Unexpected end of buffer.
}
let byte = buf.get_u8();
value |= ((byte & 0x7F) as u32) << (i * 7);
if byte < 0x80 {
// Check for overlong encoding on the 5th byte.
if unlikely(i == 4 && byte > 4) {
return None;
}
return Some(value);
}
}
// A varint must not be longer than 5 bytes.
None
}
pub mod usize {
use super::*;
#[cfg(target_pointer_width = "32")]
pub(super) use super::VARINT32_MAX_LEN as VARINT_MAX_LEN;
#[cfg(target_pointer_width = "64")]
pub(super) use super::VARINT64_MAX_LEN as VARINT_MAX_LEN;
#[inline(always)]
pub fn encode_varint(value: usize, buf: &mut impl BufMut) -> usize {
#[cfg(target_pointer_width = "32")]
return encode_varint32(value as u32, buf);
#[cfg(target_pointer_width = "64")]
return encode_varint64(value as u64, buf);
}
#[inline(always)]
pub const fn encoded_len_varint(value: usize) -> usize {
#[cfg(target_pointer_width = "32")]
return encoded_len_varint32(value as u32);
#[cfg(target_pointer_width = "64")]
return encoded_len_varint64(value as u64);
}
#[inline(always)]
pub fn decode_varint(buf: &mut impl Buf) -> Result<usize, DecodeError> {
#[cfg(target_pointer_width = "32")]
return transmute_unchecked!(decode_varint32(buf));
#[cfg(target_pointer_width = "64")]
return transmute_unchecked!(decode_varint64(buf));
}
}
pub mod bool {
use super::*;
#[inline(always)]
pub fn encode_varint(value: bool, buf: &mut impl BufMut) -> usize {
buf.put_u8(value as _);
1
}
#[inline(always)]
pub const fn encoded_len_varint(_value: bool) -> usize { 1 }
#[inline(always)]
pub fn decode_varint(buf: &mut impl Buf) -> Result<bool, DecodeError> {
fn inner(buf: &mut impl Buf) -> Option<bool> {
if unlikely(buf.remaining() == 0) {
return None;
}
let byte = buf.get_u8();
if byte <= 1 { Some(byte != 0) } else { None }
}
inner(buf).ok_or(DecodeError::new("invalid bool"))
}
#[inline]
pub(in super::super) fn encode_packed_fast<B: ReservableBuf>(values: &[bool], buf: &mut B) {
let start_ptr = buf.as_mut().as_mut_ptr();
buf.reserve(usize::VARINT_MAX_LEN);
unsafe {
buf.set_len(buf.len() + usize::VARINT_MAX_LEN);
}
let mut length = 0;
for &value in values {
length += encode_varint(value, buf);
}
let mut length_slice = unsafe {
&mut *(start_ptr as *mut [::core::mem::MaybeUninit<u8>; usize::VARINT_MAX_LEN])
as &mut [::core::mem::MaybeUninit<u8>]
};
let len = usize::encode_varint(length, &mut length_slice);
unsafe {
let dst = start_ptr.add(len);
let src = start_ptr.add(usize::VARINT_MAX_LEN);
::core::ptr::copy(src, dst, length);
buf.set_len(
buf.len()
.unchecked_sub(usize::VARINT_MAX_LEN)
.unchecked_add(len),
);
}
}
}
macro_rules! varint {
($ty:ty, $proto_ty:ident,32) => {
pub mod $proto_ty {
use super::*;
#[inline(always)]
pub fn encode_varint(value: $ty, buf: &mut impl BufMut) -> usize { encode_varint32(value as u32, buf) }
#[inline(always)]
pub const fn encoded_len_varint(value: $ty) -> usize { encoded_len_varint32(value as u32) }
#[inline(always)]
pub fn decode_varint(buf: &mut impl Buf) -> Result<$ty, DecodeError> {
transmute_unchecked!(decode_varint32(buf))
}
#[inline]
pub(in super::super) fn encode_packed_fast(values: &[$ty], buf: &mut impl ReservableBuf) {
let start_ptr = buf.as_mut().as_mut_ptr();
buf.reserve(usize::VARINT_MAX_LEN);
unsafe {
buf.set_len(buf.len() + usize::VARINT_MAX_LEN);
}
let mut length = 0;
for &value in values {
length += encode_varint(value, buf);
}
let mut length_slice = unsafe {
&mut *(start_ptr as *mut [::core::mem::MaybeUninit<u8>; usize::VARINT_MAX_LEN])
as &mut [::core::mem::MaybeUninit<u8>]
};
let len = usize::encode_varint(length, &mut length_slice);
unsafe {
let dst = start_ptr.add(len);
let src = start_ptr.add(usize::VARINT_MAX_LEN);
::core::ptr::copy(src, dst, length);
buf.set_len(
buf.len()
.unchecked_sub(usize::VARINT_MAX_LEN)
.unchecked_add(len),
);
}
}
}
};
($ty:ty, $proto_ty:ident,64) => {
pub mod $proto_ty {
use super::*;
#[inline(always)]
pub fn encode_varint(value: $ty, buf: &mut impl BufMut) -> usize { encode_varint64(value as u64, buf) }
#[inline(always)]
pub const fn encoded_len_varint(value: $ty) -> usize { encoded_len_varint64(value as u64) }
#[inline(always)]
pub fn decode_varint(buf: &mut impl Buf) -> Result<$ty, DecodeError> {
transmute_unchecked!(decode_varint64(buf))
}
#[inline]
pub(in super::super) fn encode_packed_fast(values: &[$ty], buf: &mut impl ReservableBuf) {
let start_ptr = buf.as_mut().as_mut_ptr();
buf.reserve(usize::VARINT_MAX_LEN);
unsafe {
buf.set_len(buf.len() + usize::VARINT_MAX_LEN);
}
let mut length = 0;
for &value in values {
length += encode_varint(value, buf);
}
let mut length_slice = unsafe {
&mut *(start_ptr as *mut [::core::mem::MaybeUninit<u8>; usize::VARINT_MAX_LEN])
as &mut [::core::mem::MaybeUninit<u8>]
};
let len = usize::encode_varint(length, &mut length_slice);
unsafe {
let dst = start_ptr.add(len);
let src = start_ptr.add(usize::VARINT_MAX_LEN);
::core::ptr::copy(src, dst, length);
buf.set_len(
buf.len()
.unchecked_sub(usize::VARINT_MAX_LEN)
.unchecked_add(len),
);
}
}
}
};
($ty:ty, $proto_ty:ident,32, $encode_fn:ident, $decode_fn:ident) => {
pub mod $proto_ty {
use super::*;
#[inline(always)]
pub fn encode_varint(value: $ty, buf: &mut impl BufMut) -> usize {
encode_varint32($encode_fn(value), buf)
}
#[inline(always)]
pub const fn encoded_len_varint(value: $ty) -> usize { encoded_len_varint32($encode_fn(value)) }
#[inline(always)]
pub fn decode_varint(buf: &mut impl Buf) -> Result<$ty, DecodeError> {
decode_varint32(buf).map($decode_fn)
}
#[inline]
pub(in super::super) fn encode_packed_fast(values: &[$ty], buf: &mut impl ReservableBuf) {
let start_ptr = buf.as_mut().as_mut_ptr();
buf.reserve(usize::VARINT_MAX_LEN);
unsafe {
buf.set_len(buf.len() + usize::VARINT_MAX_LEN);
}
let mut length = 0;
for &value in values {
length += encode_varint(value, buf);
}
let mut length_slice = unsafe {
&mut *(start_ptr as *mut [::core::mem::MaybeUninit<u8>; usize::VARINT_MAX_LEN])
as &mut [::core::mem::MaybeUninit<u8>]
};
let len = usize::encode_varint(length, &mut length_slice);
unsafe {
let dst = start_ptr.add(len);
let src = start_ptr.add(usize::VARINT_MAX_LEN);
::core::ptr::copy(src, dst, length);
buf.set_len(
buf.len()
.unchecked_sub(usize::VARINT_MAX_LEN)
.unchecked_add(len),
);
}
}
}
};
($ty:ty, $proto_ty:ident,64, $encode_fn:ident, $decode_fn:ident) => {
pub mod $proto_ty {
use super::*;
#[inline(always)]
pub fn encode_varint(value: $ty, buf: &mut impl BufMut) -> usize {
encode_varint64($encode_fn(value), buf)
}
#[inline(always)]
pub const fn encoded_len_varint(value: $ty) -> usize { encoded_len_varint64($encode_fn(value)) }
#[inline(always)]
pub fn decode_varint(buf: &mut impl Buf) -> Result<$ty, DecodeError> {
decode_varint64(buf).map($decode_fn)
}
#[inline]
pub(in super::super) fn encode_packed_fast(values: &[$ty], buf: &mut impl ReservableBuf) {
let start_ptr = buf.as_mut().as_mut_ptr();
buf.reserve(usize::VARINT_MAX_LEN);
unsafe {
buf.set_len(buf.len() + usize::VARINT_MAX_LEN);
}
let mut length = 0;
for &value in values {
length += encode_varint(value, buf);
}
let mut length_slice = unsafe {
&mut *(start_ptr as *mut [::core::mem::MaybeUninit<u8>; usize::VARINT_MAX_LEN])
as &mut [::core::mem::MaybeUninit<u8>]
};
let len = usize::encode_varint(length, &mut length_slice);
unsafe {
let dst = start_ptr.add(len);
let src = start_ptr.add(usize::VARINT_MAX_LEN);
::core::ptr::copy(src, dst, length);
buf.set_len(
buf.len()
.unchecked_sub(usize::VARINT_MAX_LEN)
.unchecked_add(len),
);
}
}
}
};
}
varint!(i32, int32, 32);
varint!(i64, int64, 64);
varint!(u32, uint32, 32);
varint!(u64, uint64, 64);
varint!(i32, sint32, 32, encode_zigzag32, decode_zigzag32);
varint!(i64, sint64, 64, encode_zigzag64, decode_zigzag64);
pub(super) trait ReservableBuf: Sized + BufMut + AsMut<[u8]> {
fn reserve(&mut self, additional: usize);
fn len(&self) -> usize;
unsafe fn set_len(&mut self, len: usize);
}
impl ReservableBuf for ::bytes::BytesMut {
#[inline(always)]
fn reserve(&mut self, additional: usize) { Self::reserve(self, additional); }
#[inline(always)]
fn len(&self) -> usize { Self::len(self) }
#[inline(always)]
unsafe fn set_len(&mut self, len: usize) { Self::set_len(self, len); }
}
impl ReservableBuf for ::alloc::vec::Vec<u8> {
#[inline(always)]
fn reserve(&mut self, additional: usize) { Self::reserve(self, additional); }
#[inline(always)]
fn len(&self) -> usize { Self::len(self) }
#[inline(always)]
unsafe fn set_len(&mut self, len: usize) { Self::set_len(self, len); }
}

View File

@@ -0,0 +1,70 @@
use alloc::format;
use crate::DecodeError;
/// Represent the wire type for protobuf encoding.
///
/// The integer value is equvilant with the encoded value.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum WireType {
Varint = 0,
SixtyFourBit = 1,
LengthDelimited = 2,
StartGroup = 3,
EndGroup = 4,
ThirtyTwoBit = 5,
}
impl WireType {
#[inline]
const fn try_from(value: u8) -> Option<Self> {
match value {
0 => Some(WireType::Varint),
1 => Some(WireType::SixtyFourBit),
2 => Some(WireType::LengthDelimited),
3 => Some(WireType::StartGroup),
4 => Some(WireType::EndGroup),
5 => Some(WireType::ThirtyTwoBit),
_ => None,
}
}
#[inline]
pub fn try_from_tag(tag: u32) -> Result<(Self, u32), DecodeError> {
let value = (tag & super::WireTypeMask) as u8;
match Self::try_from(value) {
Some(wire_type) => Ok((wire_type, tag >> super::WireTypeBits)),
None => Err(DecodeError::new(format!("invalid wire type value: {value}"))),
}
}
}
impl TryFrom<u32> for WireType {
type Error = DecodeError;
#[inline]
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(WireType::Varint),
1 => Ok(WireType::SixtyFourBit),
2 => Ok(WireType::LengthDelimited),
3 => Ok(WireType::StartGroup),
4 => Ok(WireType::EndGroup),
5 => Ok(WireType::ThirtyTwoBit),
_ => Err(DecodeError::new(format!("invalid wire type value: {value}"))),
}
}
}
/// Checks that the expected wire type matches the actual wire type,
/// or returns an error result.
#[inline]
pub fn check_wire_type(expected: WireType, actual: WireType) -> Result<(), DecodeError> {
if expected != actual {
return Err(DecodeError::new(format!(
"invalid wire type: {actual:?} (expected {expected:?})",
)));
}
Ok(())
}

View File

@@ -0,0 +1,180 @@
//! Protobuf encoding and decoding errors.
use alloc::borrow::Cow;
#[cfg(not(feature = "std"))]
use alloc::boxed::Box;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::fmt;
/// A Protobuf message decoding error.
///
/// `DecodeError` indicates that the input buffer does not contain a valid
/// Protobuf message. The error details should be considered 'best effort': in
/// general it is not possible to exactly pinpoint why data is malformed.
#[derive(Clone, PartialEq, Eq)]
pub struct DecodeError {
inner: Box<Inner>,
}
#[derive(Clone, PartialEq, Eq)]
struct Inner {
/// A 'best effort' root cause description.
description: Cow<'static, str>,
/// A stack of (message, field) name pairs, which identify the specific
/// message type and field where decoding failed. The stack contains an
/// entry per level of nesting.
stack: Vec<(&'static str, &'static str)>,
}
impl DecodeError {
/// Creates a new `DecodeError` with a 'best effort' root cause description.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
#[cold]
pub fn new(description: impl Into<Cow<'static, str>>) -> DecodeError {
DecodeError {
inner: Box::new(Inner {
description: description.into(),
stack: Vec::new(),
}),
}
}
/// Pushes a (message, field) name location pair on to the location stack.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
pub fn push(&mut self, message: &'static str, field: &'static str) {
self.inner.stack.push((message, field));
}
}
impl fmt::Debug for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DecodeError")
.field("description", &self.inner.description)
.field("stack", &self.inner.stack)
.finish()
}
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("failed to decode Protobuf message: ")?;
for &(message, field) in &self.inner.stack {
write!(f, "{}.{}: ", message, field)?;
}
f.write_str(&self.inner.description)
}
}
#[cfg(feature = "std")]
impl std::error::Error for DecodeError {}
#[cfg(feature = "std")]
impl From<DecodeError> for std::io::Error {
fn from(error: DecodeError) -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::InvalidData, error)
}
}
/// A Protobuf message encoding error.
///
/// `EncodeError` always indicates that a message failed to encode because the
/// provided buffer had insufficient capacity. Message encoding is otherwise
/// infallible.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct EncodeError {
required: usize,
remaining: usize,
}
impl EncodeError {
/// Creates a new `EncodeError`.
pub(crate) fn new(required: usize, remaining: usize) -> EncodeError {
EncodeError {
required,
remaining,
}
}
/// Returns the required buffer capacity to encode the message.
pub fn required_capacity(&self) -> usize {
self.required
}
/// Returns the remaining length in the provided buffer at the time of encoding.
pub fn remaining(&self) -> usize {
self.remaining
}
}
impl fmt::Display for EncodeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"failed to encode Protobuf message; insufficient buffer capacity (required: {}, remaining: {})",
self.required, self.remaining
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for EncodeError {}
#[cfg(feature = "std")]
impl From<EncodeError> for std::io::Error {
fn from(error: EncodeError) -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::InvalidInput, error)
}
}
/// An error indicating that an unknown enumeration value was encountered.
///
/// The Protobuf spec mandates that enumeration value sets are open, so this
/// error's value represents an integer value unrecognized by the
/// presently used enum definition.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct UnknownEnumValue(pub i32);
impl fmt::Display for UnknownEnumValue {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "unknown enumeration value {}", self.0)
}
}
#[cfg(feature = "std")]
impl std::error::Error for UnknownEnumValue {}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_push() {
let mut decode_error = DecodeError::new("something failed");
decode_error.push("Foo bad", "bar.foo");
decode_error.push("Baz bad", "bar.baz");
assert_eq!(
decode_error.to_string(),
"failed to decode Protobuf message: Foo bad.bar.foo: Baz bad.bar.baz: something failed"
);
}
#[cfg(feature = "std")]
#[test]
fn test_into_std_io_error() {
let decode_error = DecodeError::new("something failed");
let std_io_error = std::io::Error::from(decode_error);
assert_eq!(std_io_error.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(
std_io_error.to_string(),
"failed to decode Protobuf message: something failed"
);
}
}

View File

@@ -0,0 +1,54 @@
#![allow(internal_features, unsafe_op_in_unsafe_fn)]
#![feature(core_intrinsics, uint_bit_width, portable_simd, pattern, char_internals)]
#![doc(html_root_url = "https://docs.rs/prost/0.14.1")]
#![cfg_attr(not(feature = "std"), no_std)]
#![doc = include_str!("../README.md")]
// Re-export the alloc crate for use within derived code.
#[doc(hidden)]
pub extern crate alloc;
// Re-export the bytes crate for use within derived code.
pub use bytes;
// Re-export the alloc crate for use within derived code.
#[cfg(feature = "indexmap")]
#[doc(hidden)]
pub use indexmap;
mod error;
mod message;
// mod name;
mod types;
mod byte_str;
#[doc(hidden)]
pub mod encoding;
pub use crate::encoding::length_delimiter::{
decode_length_delimiter, encode_length_delimiter, length_delimiter_len,
};
pub use crate::error::{DecodeError, EncodeError, UnknownEnumValue};
pub use crate::message::Message;
// pub use crate::name::Name;
pub use crate::byte_str::ByteStr;
// See `encoding::DecodeContext` for more info.
// 100 is the default recursion limit in the C++ implementation.
#[cfg(not(feature = "no-recursion-limit"))]
const RECURSION_LIMIT: u32 = 100;
// Re-export #[derive(Message, Enumeration, Oneof)].
// Based on serde's equivalent re-export [1], but enabled by default.
//
// [1]: https://github.com/serde-rs/serde/blob/v1.0.89/serde/src/lib.rs#L245-L256
#[cfg(feature = "derive")]
#[allow(unused_imports)]
#[macro_use]
extern crate prost_derive;
#[cfg(feature = "derive")]
#[doc(hidden)]
pub use prost_derive::*;
#[macro_use]
extern crate macros;

View File

@@ -0,0 +1,184 @@
use core::num::NonZeroU32;
#[cfg(not(feature = "std"))]
use alloc::boxed::Box;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use bytes::{Buf, BufMut};
use crate::{
DecodeError, EncodeError,
encoding::{
DecodeContext, decode_tag, message,
varint::usize::{encode_varint, encoded_len_varint},
wire_type::WireType,
},
};
/// A Protocol Buffers message.
pub trait Message: Send + Sync {
/// Encodes the message to a buffer.
///
/// This method will panic if the buffer has insufficient capacity.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
fn encode_raw(&self, buf: &mut impl BufMut)
where
Self: Sized;
/// Decodes a field from a buffer, and merges it into `self`.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
Self: Sized;
/// Returns the encoded length of the message without a length delimiter.
fn encoded_len(&self) -> usize;
/// Encodes the message to a buffer.
///
/// An error will be returned if the buffer does not have sufficient capacity.
fn encode(&self, buf: &mut impl BufMut) -> Result<(), EncodeError>
where
Self: Sized,
{
let required = self.encoded_len();
let remaining = buf.remaining_mut();
if required > remaining {
return Err(EncodeError::new(required, remaining));
}
self.encode_raw(buf);
Ok(())
}
/// Encodes the message to a newly allocated buffer.
fn encode_to_vec(&self) -> Vec<u8>
where
Self: Sized,
{
let mut buf = Vec::with_capacity(self.encoded_len());
self.encode_raw(&mut buf);
buf
}
/// Encodes the message with a length-delimiter to a buffer.
///
/// An error will be returned if the buffer does not have sufficient capacity.
fn encode_length_delimited(&self, buf: &mut impl BufMut) -> Result<(), EncodeError>
where
Self: Sized,
{
let len = self.encoded_len();
let required = len + encoded_len_varint(len);
let remaining = buf.remaining_mut();
if required > remaining {
return Err(EncodeError::new(required, remaining));
}
encode_varint(len, buf);
self.encode_raw(buf);
Ok(())
}
/// Encodes the message with a length-delimiter to a newly allocated buffer.
fn encode_length_delimited_to_vec(&self) -> Vec<u8>
where
Self: Sized,
{
let len = self.encoded_len();
let mut buf = Vec::with_capacity(len + encoded_len_varint(len));
encode_varint(len, &mut buf);
self.encode_raw(&mut buf);
buf
}
/// Decodes an instance of the message from a buffer.
///
/// The entire buffer will be consumed.
fn decode(mut buf: impl Buf) -> Result<Self, DecodeError>
where
Self: Default,
{
let mut message = Self::default();
Self::merge(&mut message, &mut buf).map(|_| message)
}
/// Decodes a length-delimited instance of the message from the buffer.
fn decode_length_delimited(buf: impl Buf) -> Result<Self, DecodeError>
where
Self: Default,
{
let mut message = Self::default();
message.merge_length_delimited(buf)?;
Ok(message)
}
/// Decodes an instance of the message from a buffer, and merges it into `self`.
///
/// The entire buffer will be consumed.
fn merge(&mut self, mut buf: impl Buf) -> Result<(), DecodeError>
where
Self: Sized,
{
let ctx = DecodeContext::default();
while buf.has_remaining() {
let (number, wire_type) = decode_tag(&mut buf)?;
self.merge_field(number, wire_type, &mut buf, ctx.clone())?;
}
Ok(())
}
/// Decodes a length-delimited instance of the message from buffer, and
/// merges it into `self`.
fn merge_length_delimited(&mut self, mut buf: impl Buf) -> Result<(), DecodeError>
where
Self: Sized,
{
message::merge(
WireType::LengthDelimited,
self,
&mut buf,
DecodeContext::default(),
)
}
/// Clears the message, resetting all fields to their default.
fn clear(&mut self);
}
impl<M> Message for Box<M>
where
M: Message,
{
fn encode_raw(&self, buf: &mut impl BufMut) { (**self).encode_raw(buf) }
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
(**self).merge_field(number, wire_type, buf, ctx)
}
fn encoded_len(&self) -> usize { (**self).encoded_len() }
fn clear(&mut self) { (**self).clear() }
}
#[cfg(test)]
mod tests {
use super::*;
const _MESSAGE_IS_OBJECT_SAFE: Option<&dyn Message> = None;
}

View File

@@ -0,0 +1,34 @@
//! Support for associating type name information with a [`Message`].
use crate::Message;
#[cfg(not(feature = "std"))]
use alloc::{format, string::String};
/// Associate a type name with a [`Message`] type.
pub trait Name: Message {
/// Simple name for this [`Message`].
/// This name is the same as it appears in the source .proto file, e.g. `FooBar`.
const NAME: &'static str;
/// Package name this message type is contained in. They are domain-like
/// and delimited by `.`, e.g. `google.protobuf`.
const PACKAGE: &'static str;
/// Fully-qualified unique name for this [`Message`].
/// It's prefixed with the package name and names of any parent messages,
/// e.g. `google.rpc.BadRequest.FieldViolation`.
/// By default, this is the package name followed by the message name.
/// Fully-qualified names must be unique within a domain of Type URLs.
fn full_name() -> String {
format!("{}.{}", Self::PACKAGE, Self::NAME)
}
/// Type URL for this [`Message`], which by default is the full name with a
/// leading slash, but may also include a leading domain name, e.g.
/// `type.googleapis.com/google.profile.Person`.
/// This can be used when serializing into the `google.protobuf.Any` type.
fn type_url() -> String {
format!("/{}", Self::full_name())
}
}

View File

@@ -0,0 +1,573 @@
//! Protocol Buffers well-known wrapper types.
//!
//! This module provides implementations of `Message` for Rust standard library types which
//! correspond to a Protobuf well-known wrapper type. The remaining well-known types are defined in
//! the `prost-types` crate in order to avoid a cyclic dependency between `prost` and
//! `prost-build`.
use core::num::NonZeroU32;
// use alloc::format;
use alloc::string::String;
use alloc::vec::Vec;
use ::bytes::{Buf, BufMut, Bytes};
use crate::encoding::wire_type::WireType;
use crate::encoding::FieldNumber1;
use crate::{
encoding::{
bool, bytes, double, float, int32, int64, skip_field, string, uint32, uint64, DecodeContext,
},
DecodeError, Message,
};
/// `google.protobuf.BoolValue`
impl Message for bool {
fn encode_raw(&self, buf: &mut impl BufMut) {
if *self {
bool::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
bool::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self {
2
} else {
0
}
}
fn clear(&mut self) {
*self = false;
}
}
// /// `google.protobuf.BoolValue`
// impl Name for bool {
// const NAME: &'static str = "BoolValue";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.UInt32Value`
impl Message for u32 {
fn encode_raw(&self, buf: &mut impl BufMut) {
if *self != 0 {
uint32::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
uint32::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
uint32::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
// /// `google.protobuf.UInt32Value`
// impl Name for u32 {
// const NAME: &'static str = "UInt32Value";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.UInt64Value`
impl Message for u64 {
fn encode_raw(&self, buf: &mut impl BufMut) {
if *self != 0 {
uint64::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
uint64::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
uint64::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
// /// `google.protobuf.UInt64Value`
// impl Name for u64 {
// const NAME: &'static str = "UInt64Value";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.Int32Value`
impl Message for i32 {
fn encode_raw(&self, buf: &mut impl BufMut) {
if *self != 0 {
int32::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
int32::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
int32::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
// /// `google.protobuf.Int32Value`
// impl Name for i32 {
// const NAME: &'static str = "Int32Value";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.Int64Value`
impl Message for i64 {
fn encode_raw(&self, buf: &mut impl BufMut) {
if *self != 0 {
int64::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
int64::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
int64::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
// /// `google.protobuf.Int64Value`
// impl Name for i64 {
// const NAME: &'static str = "Int64Value";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.FloatValue`
impl Message for f32 {
fn encode_raw(&self, buf: &mut impl BufMut) {
if *self != 0.0 {
float::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
float::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0.0 {
float::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0.0;
}
}
// /// `google.protobuf.FloatValue`
// impl Name for f32 {
// const NAME: &'static str = "FloatValue";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.DoubleValue`
impl Message for f64 {
fn encode_raw(&self, buf: &mut impl BufMut) {
if *self != 0.0 {
double::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
double::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0.0 {
double::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0.0;
}
}
// /// `google.protobuf.DoubleValue`
// impl Name for f64 {
// const NAME: &'static str = "DoubleValue";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.StringValue`
impl Message for String {
fn encode_raw(&self, buf: &mut impl BufMut) {
if !self.is_empty() {
string::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
string::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if !self.is_empty() {
string::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
self.clear();
}
}
// /// `google.protobuf.StringValue`
// impl Name for String {
// const NAME: &'static str = "StringValue";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.BytesValue`
impl Message for Vec<u8> {
fn encode_raw(&self, buf: &mut impl BufMut) {
if !self.is_empty() {
bytes::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
bytes::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if !self.is_empty() {
bytes::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
self.clear();
}
}
// /// `google.protobuf.BytesValue`
// impl Name for Vec<u8> {
// const NAME: &'static str = "BytesValue";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.BytesValue`
impl Message for Bytes {
fn encode_raw(&self, buf: &mut impl BufMut) {
if !self.is_empty() {
bytes::encode(FieldNumber1, self, buf)
}
}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
if number == FieldNumber1 {
bytes::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, number, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if !self.is_empty() {
bytes::encoded_len(FieldNumber1, self)
} else {
0
}
}
fn clear(&mut self) {
self.clear();
}
}
// /// `google.protobuf.BytesValue`
// impl Name for Bytes {
// const NAME: &'static str = "BytesValue";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
/// `google.protobuf.Empty`
impl Message for () {
fn encode_raw(&self, _buf: &mut impl BufMut) {}
fn merge_field(
&mut self,
number: NonZeroU32,
wire_type: WireType,
buf: &mut impl Buf,
ctx: DecodeContext,
) -> Result<(), DecodeError> {
skip_field(wire_type, number, buf, ctx)
}
fn encoded_len(&self) -> usize {
0
}
fn clear(&mut self) {}
}
// /// `google.protobuf.Empty`
// impl Name for () {
// const NAME: &'static str = "Empty";
// const PACKAGE: &'static str = "google.protobuf";
// fn type_url() -> String {
// googleapis_type_url_for::<Self>()
// }
// }
// /// Compute the type URL for the given `google.protobuf` type, using `type.googleapis.com` as the
// /// authority for the URL.
// fn googleapis_type_url_for<T: Name>() -> String {
// format!("type.googleapis.com/{}.{}", T::PACKAGE, T::NAME)
// }
// #[cfg(test)]
// mod tests {
// use super::*;
// #[test]
// fn test_impl_name() {
// assert_eq!("BoolValue", bool::NAME);
// assert_eq!("google.protobuf", bool::PACKAGE);
// assert_eq!("google.protobuf.BoolValue", bool::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.BoolValue",
// bool::type_url()
// );
// assert_eq!("UInt32Value", u32::NAME);
// assert_eq!("google.protobuf", u32::PACKAGE);
// assert_eq!("google.protobuf.UInt32Value", u32::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.UInt32Value",
// u32::type_url()
// );
// assert_eq!("UInt64Value", u64::NAME);
// assert_eq!("google.protobuf", u64::PACKAGE);
// assert_eq!("google.protobuf.UInt64Value", u64::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.UInt64Value",
// u64::type_url()
// );
// assert_eq!("Int32Value", i32::NAME);
// assert_eq!("google.protobuf", i32::PACKAGE);
// assert_eq!("google.protobuf.Int32Value", i32::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.Int32Value",
// i32::type_url()
// );
// assert_eq!("Int64Value", i64::NAME);
// assert_eq!("google.protobuf", i64::PACKAGE);
// assert_eq!("google.protobuf.Int64Value", i64::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.Int64Value",
// i64::type_url()
// );
// assert_eq!("FloatValue", f32::NAME);
// assert_eq!("google.protobuf", f32::PACKAGE);
// assert_eq!("google.protobuf.FloatValue", f32::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.FloatValue",
// f32::type_url()
// );
// assert_eq!("DoubleValue", f64::NAME);
// assert_eq!("google.protobuf", f64::PACKAGE);
// assert_eq!("google.protobuf.DoubleValue", f64::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.DoubleValue",
// f64::type_url()
// );
// assert_eq!("StringValue", String::NAME);
// assert_eq!("google.protobuf", String::PACKAGE);
// assert_eq!("google.protobuf.StringValue", String::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.StringValue",
// String::type_url()
// );
// assert_eq!("BytesValue", Vec::<u8>::NAME);
// assert_eq!("google.protobuf", Vec::<u8>::PACKAGE);
// assert_eq!("google.protobuf.BytesValue", Vec::<u8>::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.BytesValue",
// Vec::<u8>::type_url()
// );
// assert_eq!("BytesValue", Bytes::NAME);
// assert_eq!("google.protobuf", Bytes::PACKAGE);
// assert_eq!("google.protobuf.BytesValue", Bytes::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.BytesValue",
// Bytes::type_url()
// );
// assert_eq!("Empty", <()>::NAME);
// assert_eq!("google.protobuf", <()>::PACKAGE);
// assert_eq!("google.protobuf.Empty", <()>::full_name());
// assert_eq!(
// "type.googleapis.com/google.protobuf.Empty",
// <()>::type_url()
// );
// }
// }

View File

@@ -0,0 +1,25 @@
[package]
name = "prost-derive"
readme = "README.md"
description = "Generate encoding and decoding implementations for Prost annotated types."
version = "0.14.1"
authors = [
"Dan Burkert <dan@danburkert.com>",
"Lucio Franco <luciofranco14@gmail.com>",
"Casper Meijn <casper@meijn.net>",
"Tokio Contributors <team@tokio.rs>",
]
license = "Apache-2.0"
repository = "https://github.com/tokio-rs/prost"
edition = "2021"
rust-version = "1.71.1"
[lib]
proc-macro = true
[dependencies]
anyhow = "1.0.1"
itertools = ">=0.10.1, <=0.14"
proc-macro2 = "1.0"
quote = "1"
syn = { version = "2", features = ["extra-traits"] }

201
patch/prost-derive/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,16 @@
[![Documentation](https://docs.rs/prost-derive/badge.svg)](https://docs.rs/prost-derive/)
[![Crate](https://img.shields.io/crates/v/prost-derive.svg)](https://crates.io/crates/prost-derive)
# prost-derive
`prost-derive` handles generating encoding and decoding implementations for Rust
types annotated with `prost` annotation. For the most part, users of `prost`
shouldn't need to interact with `prost-derive` directly.
## License
`prost-derive` is distributed under the terms of the Apache License (Version 2.0).
See [LICENSE](../LICENSE) for details.
Copyright 2017 Dan Burkert

View File

@@ -0,0 +1,137 @@
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::{Meta, Path};
use crate::field::{set_bool, set_option, tag_attr, word_attr, Label};
#[derive(Clone)]
pub struct Field {
pub label: Label,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut group = false;
let mut label = None;
let mut tag = None;
let mut boxed = false;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if word_attr("group", attr) {
set_bool(&mut group, "duplicate group attributes")?;
} else if word_attr("boxed", attr) {
set_bool(&mut boxed, "duplicate boxed attributes")?;
} else if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(l) = Label::from_attr(attr) {
set_option(&mut label, l, "duplicate label attributes")?;
} else {
unknown_attrs.push(attr);
}
}
if !group {
return Ok(None);
}
if !unknown_attrs.is_empty() {
bail!(
"unknown attribute(s) for group field: #[prost({})]",
quote!(#(#unknown_attrs),*)
);
}
let tag = match tag.or(inferred_tag) {
Some(tag) => tag,
None => bail!("group field is missing a tag attribute"),
};
Ok(Some(Field {
label: label.unwrap_or(Label::Optional),
tag,
}))
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
if let Some(mut field) = Field::new(attrs, None)? {
if let Some(attr) = attrs.iter().find(|attr| Label::from_attr(attr).is_some()) {
bail!(
"invalid attribute for oneof field: {}",
attr.path().into_token_stream()
);
}
field.label = Label::Required;
Ok(Some(field))
} else {
Ok(None)
}
}
pub fn encode(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
if let Some(ref msg) = #ident {
const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };
#prost_path::encoding::group::encode(TAG, msg, buf);
}
},
Label::Required => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::group::encode(TAG, &#ident, buf);}
},
Label::Repeated => quote! {
for msg in &#ident {
const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };
#prost_path::encoding::group::encode(TAG, msg, buf);
}
},
}
}
pub fn merge(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote! {
#prost_path::encoding::group::merge(
tag,
wire_type,
#ident.get_or_insert_with(::core::default::Default::default),
buf,
ctx,
)
},
Label::Required => quote! {
#prost_path::encoding::group::merge(tag, wire_type, #ident, buf, ctx)
},
Label::Repeated => quote! {
#prost_path::encoding::group::merge_repeated(tag, wire_type, #ident, buf, ctx)
},
}
}
pub fn encoded_len(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
#ident.as_ref().map_or(0, |msg| {const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::group::encoded_len(#tag, msg)})
},
Label::Required => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::group::encoded_len(TAG, &#ident)}
},
Label::Repeated => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::group::encoded_len_repeated(TAG, &#ident)}
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote!(#ident = ::core::option::Option::None),
Label::Required => quote!(#ident.clear()),
Label::Repeated => quote!(#ident.clear()),
}
}
}

View File

@@ -0,0 +1,411 @@
use anyhow::{bail, Error};
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::punctuated::Punctuated;
use syn::{Expr, ExprLit, Ident, Lit, Meta, MetaNameValue, Path, Token};
use crate::field::{scalar, set_option, tag_attr};
#[derive(Clone, Debug)]
pub enum MapTy {
HashMap,
BTreeMap,
IndexMap,
}
impl MapTy {
fn from_str(s: &str) -> Option<MapTy> {
match s {
"map" | "hash_map" => Some(MapTy::HashMap),
"btree_map" => Some(MapTy::BTreeMap),
"index_map" => Some(MapTy::IndexMap),
_ => None,
}
}
fn module(&self) -> Ident {
match *self {
MapTy::HashMap => Ident::new("hash_map", Span::call_site()),
MapTy::BTreeMap => Ident::new("btree_map", Span::call_site()),
MapTy::IndexMap => Ident::new("index_map", Span::call_site()),
}
}
fn lib(&self) -> TokenStream {
match self {
MapTy::HashMap => quote! { std::collections },
MapTy::BTreeMap => quote! { prost::alloc::collections },
MapTy::IndexMap => quote! { prost::indexmap },
}
}
}
fn fake_scalar(ty: scalar::Ty) -> scalar::Field {
let kind = scalar::Kind::Plain(scalar::DefaultValue::new(&ty));
scalar::Field {
ty,
kind,
tag: 0, // Not used here
}
}
#[derive(Clone)]
pub struct Field {
pub map_ty: MapTy,
pub key_ty: scalar::Ty,
pub value_ty: ValueTy,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut types = None;
let mut tag = None;
for attr in attrs {
if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(map_ty) = attr
.path()
.get_ident()
.and_then(|i| MapTy::from_str(&i.to_string()))
{
let (k, v): (String, String) = match attr {
Meta::NameValue(MetaNameValue {
value:
Expr::Lit(ExprLit {
lit: Lit::Str(lit), ..
}),
..
}) => {
let items = lit.value();
let mut items = items.split(',').map(ToString::to_string);
let k = items.next().unwrap();
let v = match items.next() {
Some(k) => k,
None => bail!("invalid map attribute: must have key and value types"),
};
if items.next().is_some() {
bail!("invalid map attribute: {:?}", attr);
}
(k, v)
}
Meta::List(meta_list) => {
let nested = meta_list
.parse_args_with(Punctuated::<Ident, Token![,]>::parse_terminated)?
.into_iter()
.collect::<Vec<_>>();
if nested.len() != 2 {
bail!("invalid map attribute: must contain key and value types");
}
(nested[0].to_string(), nested[1].to_string())
}
_ => return Ok(None),
};
set_option(
&mut types,
(map_ty, key_ty_from_str(&k)?, ValueTy::from_str(&v)?),
"duplicate map type attribute",
)?;
} else {
return Ok(None);
}
}
Ok(match (types, tag.or(inferred_tag)) {
(Some((map_ty, key_ty, value_ty)), Some(tag)) => Some(Field {
map_ty,
key_ty,
value_ty,
tag,
}),
_ => None,
})
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
Field::new(attrs, None)
}
/// Returns a statement which encodes the map field.
pub fn encode(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let tag = self.tag;
let key_mod = self.key_ty.module();
let ke = quote!(#prost_path::encoding::#key_mod::encode);
let kl = quote!(#prost_path::encoding::#key_mod::encoded_len);
let module = self.map_ty.module();
match &self.value_ty {
ValueTy::Scalar(scalar::Ty::Enumeration(ty)) => {
let default = quote!(#ty::default() as i32);
quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::#module::encode_with_default(
#ke,
#kl,
#prost_path::encoding::int32::encode,
#prost_path::encoding::int32::encoded_len,
&(#default),
TAG,
&#ident,
buf,
);}
}
}
ValueTy::Scalar(value_ty) => {
let val_mod = value_ty.module();
let ve = quote!(#prost_path::encoding::#val_mod::encode);
let vl = quote!(#prost_path::encoding::#val_mod::encoded_len);
quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::#module::encode(
#ke,
#kl,
#ve,
#vl,
TAG,
&#ident,
buf,
);}
}
}
ValueTy::Message => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::#module::encode(
#ke,
#kl,
#prost_path::encoding::message::encode,
#prost_path::encoding::message::encoded_len,
TAG,
&#ident,
buf,
);}
},
}
}
/// Returns an expression which evaluates to the result of merging a decoded key value pair
/// into the map.
pub fn merge(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let key_mod = self.key_ty.module();
let km = quote!(#prost_path::encoding::#key_mod::merge);
let module = self.map_ty.module();
match &self.value_ty {
ValueTy::Scalar(scalar::Ty::Enumeration(ty)) => {
let default = quote!(#ty::default() as i32);
quote! {
#prost_path::encoding::#module::merge_with_default(
#km,
#prost_path::encoding::int32::merge,
#default,
&mut #ident,
buf,
ctx,
)
}
}
ValueTy::Scalar(value_ty) => {
let val_mod = value_ty.module();
let vm = quote!(#prost_path::encoding::#val_mod::merge);
quote!(#prost_path::encoding::#module::merge(#km, #vm, &mut #ident, buf, ctx))
}
ValueTy::Message => quote! {
#prost_path::encoding::#module::merge(
#km,
#prost_path::encoding::message::merge,
&mut #ident,
buf,
ctx,
)
},
}
}
/// Returns an expression which evaluates to the encoded length of the map.
pub fn encoded_len(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let tag = self.tag;
let key_mod = self.key_ty.module();
let kl = quote!(#prost_path::encoding::#key_mod::encoded_len);
let module = self.map_ty.module();
match &self.value_ty {
ValueTy::Scalar(scalar::Ty::Enumeration(ty)) => {
let default = quote!(#ty::default() as i32);
quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::#module::encoded_len_with_default(
#kl,
#prost_path::encoding::int32::encoded_len,
&(#default),
TAG,
&#ident,
)}
}
}
ValueTy::Scalar(value_ty) => {
let val_mod = value_ty.module();
let vl = quote!(#prost_path::encoding::#val_mod::encoded_len);
quote!({const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::#module::encoded_len(#kl, #vl, TAG, &#ident)})
}
ValueTy::Message => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::#module::encoded_len(
#kl,
#prost_path::encoding::message::encoded_len,
TAG,
&#ident,
)}
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
quote!(#ident.clear())
}
/// Returns methods to embed in the message.
pub fn methods(&self, prost_path: &Path, ident: &TokenStream) -> Option<TokenStream> {
if let ValueTy::Scalar(scalar::Ty::Enumeration(ty)) = &self.value_ty {
let key_ty = self.key_ty.rust_type(prost_path);
let key_ref_ty = self.key_ty.rust_ref_type();
let get = Ident::new(&format!("get_{ident}"), Span::call_site());
let insert = Ident::new(&format!("insert_{ident}"), Span::call_site());
let take_ref = if self.key_ty.is_numeric() {
quote!(&)
} else {
quote!()
};
let get_doc = format!(
"Returns the enum value for the corresponding key in `{ident}`, \
or `None` if the entry does not exist or it is not a valid enum value."
);
let insert_doc = format!("Inserts a key value pair into `{ident}`.");
Some(quote! {
#[doc=#get_doc]
pub fn #get(&self, key: #key_ref_ty) -> ::core::option::Option<#ty> {
self.#ident.get(#take_ref key).cloned().and_then(|x| {
let result: ::core::result::Result<#ty, _> = ::core::convert::TryFrom::try_from(x);
result.ok()
})
}
#[doc=#insert_doc]
pub fn #insert(&mut self, key: #key_ty, value: #ty) -> ::core::option::Option<#ty> {
self.#ident.insert(key, value as i32).and_then(|x| {
let result: ::core::result::Result<#ty, _> = ::core::convert::TryFrom::try_from(x);
result.ok()
})
}
})
} else {
None
}
}
/// Returns a newtype wrapper around the map, implementing nicer Debug
///
/// The Debug tries to convert any enumerations met into the variants if possible, instead of
/// outputting the raw numbers.
pub fn debug(&self, prost_path: &Path, wrapper_name: TokenStream) -> TokenStream {
let type_name = match self.map_ty {
MapTy::HashMap => Ident::new("HashMap", Span::call_site()),
MapTy::BTreeMap => Ident::new("BTreeMap", Span::call_site()),
MapTy::IndexMap => Ident::new("IndexMap", Span::call_site()),
};
// A fake field for generating the debug wrapper
let key_wrapper = fake_scalar(self.key_ty.clone()).debug(prost_path, quote!(KeyWrapper));
let key = self.key_ty.rust_type(prost_path);
let value_wrapper = self.value_ty.debug(prost_path);
let libname = self.map_ty.lib();
let fmt = quote! {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
#key_wrapper
#value_wrapper
let mut builder = f.debug_map();
for (k, v) in self.0 {
builder.entry(&KeyWrapper(k), &ValueWrapper(v));
}
builder.finish()
}
};
match &self.value_ty {
ValueTy::Scalar(ty) => {
if let scalar::Ty::Bytes(_) = *ty {
return quote! {
struct #wrapper_name<'a>(&'a dyn ::core::fmt::Debug);
impl<'a> ::core::fmt::Debug for #wrapper_name<'a> {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
};
}
let value = ty.rust_type(prost_path);
quote! {
struct #wrapper_name<'a>(&'a ::#libname::#type_name<#key, #value>);
impl<'a> ::core::fmt::Debug for #wrapper_name<'a> {
#fmt
}
}
}
ValueTy::Message => quote! {
struct #wrapper_name<'a, V: 'a>(&'a ::#libname::#type_name<#key, V>);
impl<'a, V> ::core::fmt::Debug for #wrapper_name<'a, V>
where
V: ::core::fmt::Debug + 'a,
{
#fmt
}
},
}
}
}
fn key_ty_from_str(s: &str) -> Result<scalar::Ty, Error> {
let ty = scalar::Ty::from_str(s)?;
match ty {
scalar::Ty::Int32
| scalar::Ty::Int64
| scalar::Ty::Uint32
| scalar::Ty::Uint64
| scalar::Ty::Sint32
| scalar::Ty::Sint64
| scalar::Ty::Fixed32
| scalar::Ty::Fixed64
| scalar::Ty::Sfixed32
| scalar::Ty::Sfixed64
| scalar::Ty::Bool
| scalar::Ty::String(..) => Ok(ty),
_ => bail!("invalid map key type: {}", s),
}
}
/// A map value type.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ValueTy {
Scalar(scalar::Ty),
Message,
}
impl ValueTy {
fn from_str(s: &str) -> Result<ValueTy, Error> {
if let Ok(ty) = scalar::Ty::from_str(s) {
Ok(ValueTy::Scalar(ty))
} else if s.trim() == "message" {
Ok(ValueTy::Message)
} else {
bail!("invalid map value type: {}", s);
}
}
/// Returns a newtype wrapper around the ValueTy for nicer debug.
///
/// If the contained value is enumeration, it tries to convert it to the variant. If not, it
/// just forwards the implementation.
fn debug(&self, prost_path: &Path) -> TokenStream {
match self {
ValueTy::Scalar(ty) => fake_scalar(ty.clone()).debug(prost_path, quote!(ValueWrapper)),
ValueTy::Message => quote!(
fn ValueWrapper<T>(v: T) -> T {
v
}
),
}
}
}

View File

@@ -0,0 +1,134 @@
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::{Meta, Path};
use crate::field::{set_bool, set_option, tag_attr, word_attr, Label};
#[derive(Clone)]
pub struct Field {
pub label: Label,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut message = false;
let mut label = None;
let mut tag = None;
let mut boxed = false;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if word_attr("message", attr) {
set_bool(&mut message, "duplicate message attribute")?;
} else if word_attr("boxed", attr) {
set_bool(&mut boxed, "duplicate boxed attribute")?;
} else if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(l) = Label::from_attr(attr) {
set_option(&mut label, l, "duplicate label attributes")?;
} else {
unknown_attrs.push(attr);
}
}
if !message {
return Ok(None);
}
if !unknown_attrs.is_empty() {
bail!(
"unknown attribute(s) for message field: #[prost({})]",
quote!(#(#unknown_attrs),*)
);
}
let tag = match tag.or(inferred_tag) {
Some(tag) => tag,
None => bail!("message field is missing a tag attribute"),
};
Ok(Some(Field {
label: label.unwrap_or(Label::Optional),
tag,
}))
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
if let Some(mut field) = Field::new(attrs, None)? {
if let Some(attr) = attrs.iter().find(|attr| Label::from_attr(attr).is_some()) {
bail!(
"invalid attribute for oneof field: {}",
attr.path().into_token_stream()
);
}
field.label = Label::Required;
Ok(Some(field))
} else {
Ok(None)
}
}
pub fn encode(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
if let Some(ref msg) = #ident {
const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };
#prost_path::encoding::message::encode(TAG, msg, buf);
}
},
Label::Required => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::message::encode(TAG, &#ident, buf);}
},
Label::Repeated => quote! {
for msg in &#ident {
const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };
#prost_path::encoding::message::encode(TAG, msg, buf);
}
},
}
}
pub fn merge(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote! {
#prost_path::encoding::message::merge(wire_type,
#ident.get_or_insert_with(::core::default::Default::default),
buf,
ctx)
},
Label::Required => quote! {
#prost_path::encoding::message::merge(wire_type, #ident, buf, ctx)
},
Label::Repeated => quote! {
#prost_path::encoding::message::merge_repeated(wire_type, #ident, buf, ctx)
},
}
}
pub fn encoded_len(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
#ident.as_ref().map_or(0, |msg| {const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::message::encoded_len(TAG, msg)})
},
Label::Required => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::message::encoded_len(TAG, &#ident)}
},
Label::Repeated => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#prost_path::encoding::message::encoded_len_repeated(TAG, &#ident)}
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote!(#ident = ::core::option::Option::None),
Label::Required => quote!(#ident.clear()),
Label::Repeated => quote!(#ident.clear()),
}
}
}

View File

@@ -0,0 +1,356 @@
mod group;
mod map;
mod message;
mod oneof;
mod scalar;
use std::fmt;
use std::slice;
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::quote;
use syn::punctuated::Punctuated;
use syn::Path;
use syn::{Attribute, Expr, ExprLit, Lit, LitBool, LitInt, Meta, MetaNameValue, Token};
#[derive(Clone)]
pub enum Field {
/// A scalar field.
Scalar(scalar::Field),
/// A message field.
Message(message::Field),
/// A map field.
Map(map::Field),
/// A oneof field.
Oneof(oneof::Field),
/// A group field.
Group(group::Field),
}
impl Field {
/// Creates a new `Field` from an iterator of field attributes.
///
/// If the meta items are invalid, an error will be returned.
/// If the field should be ignored, `None` is returned.
pub fn new(attrs: Vec<Attribute>, inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let attrs = prost_attrs(attrs)?;
// TODO: check for ignore attribute.
let field = if let Some(field) = scalar::Field::new(&attrs, inferred_tag)? {
Field::Scalar(field)
} else if let Some(field) = message::Field::new(&attrs, inferred_tag)? {
Field::Message(field)
} else if let Some(field) = map::Field::new(&attrs, inferred_tag)? {
Field::Map(field)
} else if let Some(field) = oneof::Field::new(&attrs)? {
Field::Oneof(field)
} else if let Some(field) = group::Field::new(&attrs, inferred_tag)? {
Field::Group(field)
} else {
bail!("no type attribute");
};
Ok(Some(field))
}
/// Creates a new oneof `Field` from an iterator of field attributes.
///
/// If the meta items are invalid, an error will be returned.
/// If the field should be ignored, `None` is returned.
pub fn new_oneof(attrs: Vec<Attribute>) -> Result<Option<Field>, Error> {
let attrs = prost_attrs(attrs)?;
// TODO: check for ignore attribute.
let field = if let Some(field) = scalar::Field::new_oneof(&attrs)? {
Field::Scalar(field)
} else if let Some(field) = message::Field::new_oneof(&attrs)? {
Field::Message(field)
} else if let Some(field) = map::Field::new_oneof(&attrs)? {
Field::Map(field)
} else if let Some(field) = group::Field::new_oneof(&attrs)? {
Field::Group(field)
} else {
bail!("no type attribute for oneof field");
};
Ok(Some(field))
}
pub fn tags(&self) -> Vec<u32> {
match *self {
Field::Scalar(ref scalar) => vec![scalar.tag],
Field::Message(ref message) => vec![message.tag],
Field::Map(ref map) => vec![map.tag],
Field::Oneof(ref oneof) => oneof.tags.clone(),
Field::Group(ref group) => vec![group.tag],
}
}
/// Returns a statement which encodes the field.
pub fn encode(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.encode(prost_path, ident),
Field::Message(ref message) => message.encode(prost_path, ident),
Field::Map(ref map) => map.encode(prost_path, ident),
Field::Oneof(ref oneof) => oneof.encode(ident),
Field::Group(ref group) => group.encode(prost_path, ident),
}
}
/// Returns an expression which evaluates to the result of merging a decoded
/// value into the field.
pub fn merge(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.merge(prost_path, ident),
Field::Message(ref message) => message.merge(prost_path, ident),
Field::Map(ref map) => map.merge(prost_path, ident),
Field::Oneof(ref oneof) => oneof.merge(ident),
Field::Group(ref group) => group.merge(prost_path, ident),
}
}
/// Returns an expression which evaluates to the encoded length of the field.
pub fn encoded_len(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.encoded_len(prost_path, ident),
Field::Map(ref map) => map.encoded_len(prost_path, ident),
Field::Message(ref msg) => msg.encoded_len(prost_path, ident),
Field::Oneof(ref oneof) => oneof.encoded_len(ident),
Field::Group(ref group) => group.encoded_len(prost_path, ident),
}
}
/// Returns a statement which clears the field.
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.clear(ident),
Field::Message(ref message) => message.clear(ident),
Field::Map(ref map) => map.clear(ident),
Field::Oneof(ref oneof) => oneof.clear(ident),
Field::Group(ref group) => group.clear(ident),
}
}
pub fn default(&self, prost_path: &Path) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.default(prost_path),
_ => quote!(::core::default::Default::default()),
}
}
/// Produces the fragment implementing debug for the given field.
pub fn debug(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => {
let wrapper = scalar.debug(prost_path, quote!(ScalarWrapper));
quote! {
{
#wrapper
ScalarWrapper(&#ident)
}
}
}
Field::Map(ref map) => {
let wrapper = map.debug(prost_path, quote!(MapWrapper));
quote! {
{
#wrapper
MapWrapper(&#ident)
}
}
}
_ => quote!(&#ident),
}
}
pub fn methods(&self, prost_path: &Path, ident: &TokenStream) -> Option<TokenStream> {
match *self {
Field::Scalar(ref scalar) => scalar.methods(ident),
Field::Map(ref map) => map.methods(prost_path, ident),
_ => None,
}
}
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum Label {
/// An optional field.
Optional,
/// A required field.
Required,
/// A repeated field.
Repeated,
}
impl Label {
fn as_str(self) -> &'static str {
match self {
Label::Optional => "optional",
Label::Required => "required",
Label::Repeated => "repeated",
}
}
fn variants() -> slice::Iter<'static, Label> {
const VARIANTS: &[Label] = &[Label::Optional, Label::Required, Label::Repeated];
VARIANTS.iter()
}
/// Parses a string into a field label.
/// If the string doesn't match a field label, `None` is returned.
fn from_attr(attr: &Meta) -> Option<Label> {
if let Meta::Path(ref path) = *attr {
for &label in Label::variants() {
if path.is_ident(label.as_str()) {
return Some(label);
}
}
}
None
}
}
impl fmt::Debug for Label {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
impl fmt::Display for Label {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Get the items belonging to the 'prost' list attribute, e.g. `#[prost(foo, bar="baz")]`.
fn prost_attrs(attrs: Vec<Attribute>) -> Result<Vec<Meta>, Error> {
let mut result = Vec::new();
for attr in attrs.iter() {
if let Meta::List(meta_list) = &attr.meta {
if meta_list.path.is_ident("prost") {
result.extend(
meta_list
.parse_args_with(Punctuated::<Meta, Token![,]>::parse_terminated)?
.into_iter(),
)
}
}
}
Ok(result)
}
pub fn set_option<T>(option: &mut Option<T>, value: T, message: &str) -> Result<(), Error>
where
T: fmt::Debug,
{
if let Some(ref existing) = *option {
bail!("{}: {:?} and {:?}", message, existing, value);
}
*option = Some(value);
Ok(())
}
pub fn set_bool(b: &mut bool, message: &str) -> Result<(), Error> {
if *b {
bail!("{}", message);
} else {
*b = true;
Ok(())
}
}
/// Unpacks an attribute into a (key, boolean) pair, returning the boolean value.
/// If the key doesn't match the attribute, `None` is returned.
fn bool_attr(key: &str, attr: &Meta) -> Result<Option<bool>, Error> {
if !attr.path().is_ident(key) {
return Ok(None);
}
match *attr {
Meta::Path(..) => Ok(Some(true)),
Meta::List(ref meta_list) => Ok(Some(meta_list.parse_args::<LitBool>()?.value())),
Meta::NameValue(MetaNameValue {
value:
Expr::Lit(ExprLit {
lit: Lit::Str(ref lit),
..
}),
..
}) => lit
.value()
.parse::<bool>()
.map_err(Error::from)
.map(Option::Some),
Meta::NameValue(MetaNameValue {
value:
Expr::Lit(ExprLit {
lit: Lit::Bool(LitBool { value, .. }),
..
}),
..
}) => Ok(Some(value)),
_ => bail!("invalid {} attribute", key),
}
}
/// Checks if an attribute matches a word.
fn word_attr(key: &str, attr: &Meta) -> bool {
if let Meta::Path(ref path) = *attr {
path.is_ident(key)
} else {
false
}
}
pub(super) fn tag_attr(attr: &Meta) -> Result<Option<u32>, Error> {
if !attr.path().is_ident("tag") {
return Ok(None);
}
match *attr {
Meta::List(ref meta_list) => Ok(Some(meta_list.parse_args::<LitInt>()?.base10_parse()?)),
Meta::NameValue(MetaNameValue {
value: Expr::Lit(ref expr),
..
}) => match expr.lit {
Lit::Str(ref lit) => lit
.value()
.parse::<u32>()
.map_err(Error::from)
.map(Option::Some),
Lit::Int(ref lit) => Ok(Some(lit.base10_parse()?)),
_ => bail!("invalid tag attribute: {:?}", attr),
},
_ => bail!("invalid tag attribute: {:?}", attr),
}
}
fn tags_attr(attr: &Meta) -> Result<Option<Vec<u32>>, Error> {
if !attr.path().is_ident("tags") {
return Ok(None);
}
match *attr {
Meta::List(ref meta_list) => Ok(Some(
meta_list
.parse_args_with(Punctuated::<LitInt, Token![,]>::parse_terminated)?
.iter()
.map(LitInt::base10_parse)
.collect::<Result<Vec<_>, _>>()?,
)),
Meta::NameValue(MetaNameValue {
value:
Expr::Lit(ExprLit {
lit: Lit::Str(ref lit),
..
}),
..
}) => lit
.value()
.split(',')
.map(|s| s.trim().parse::<u32>().map_err(Error::from))
.collect::<Result<Vec<u32>, _>>()
.map(Some),
_ => bail!("invalid tag attribute: {:?}", attr),
}
}

View File

@@ -0,0 +1,90 @@
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::quote;
use syn::{parse_str, Expr, ExprLit, Ident, Lit, Meta, MetaNameValue, Path};
use crate::field::{set_option, tags_attr};
#[derive(Clone)]
pub struct Field {
pub ty: Path,
pub tags: Vec<u32>,
}
impl Field {
pub fn new(attrs: &[Meta]) -> Result<Option<Field>, Error> {
let mut ty = None;
let mut tags = None;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if attr.path().is_ident("oneof") {
let t = match *attr {
Meta::NameValue(MetaNameValue {
value:
Expr::Lit(ExprLit {
lit: Lit::Str(ref lit),
..
}),
..
}) => parse_str::<Path>(&lit.value())?,
Meta::List(ref list) => list.parse_args::<Ident>()?.into(),
_ => bail!("invalid oneof attribute: {:?}", attr),
};
set_option(&mut ty, t, "duplicate oneof attribute")?;
} else if let Some(t) = tags_attr(attr)? {
set_option(&mut tags, t, "duplicate tags attributes")?;
} else {
unknown_attrs.push(attr);
}
}
let ty = match ty {
Some(ty) => ty,
None => return Ok(None),
};
if !unknown_attrs.is_empty() {
bail!(
"unknown attribute(s) for message field: #[prost({})]",
quote!(#(#unknown_attrs),*)
);
}
let tags = match tags {
Some(tags) => tags,
None => bail!("oneof field is missing a tags attribute"),
};
Ok(Some(Field { ty, tags }))
}
/// Returns a statement which encodes the oneof field.
pub fn encode(&self, ident: TokenStream) -> TokenStream {
quote! {
if let Some(ref oneof) = #ident {
oneof.encode(buf)
}
}
}
/// Returns an expression which evaluates to the result of decoding the oneof field.
pub fn merge(&self, ident: TokenStream) -> TokenStream {
let ty = &self.ty;
quote! {
#ty::merge(#ident, number, wire_type, buf, ctx)
}
}
/// Returns an expression which evaluates to the encoded length of the oneof field.
pub fn encoded_len(&self, ident: TokenStream) -> TokenStream {
let ty = &self.ty;
quote! {
#ident.as_ref().map_or(0, #ty::encoded_len)
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
quote!(#ident = ::core::option::Option::None)
}
}

View File

@@ -0,0 +1,842 @@
use std::fmt;
use anyhow::{Error, anyhow, bail};
use proc_macro2::{Span, TokenStream};
use quote::{ToTokens, TokenStreamExt, quote};
use syn::{Expr, ExprLit, Ident, Index, Lit, LitByteStr, Meta, MetaNameValue, Path, parse_str};
use crate::field::{Label, bool_attr, set_option, tag_attr};
/// A scalar protobuf field.
#[derive(Clone)]
pub struct Field {
pub ty: Ty,
pub kind: Kind,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut ty = None;
let mut label = None;
let mut packed = None;
let mut default = None;
let mut tag = None;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if let Some(t) = Ty::from_attr(attr)? {
set_option(&mut ty, t, "duplicate type attributes")?;
} else if let Some(p) = bool_attr("packed", attr)? {
set_option(&mut packed, p, "duplicate packed attributes")?;
} else if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(l) = Label::from_attr(attr) {
set_option(&mut label, l, "duplicate label attributes")?;
} else if let Some(d) = DefaultValue::from_attr(attr)? {
set_option(&mut default, d, "duplicate default attributes")?;
} else {
unknown_attrs.push(attr);
}
}
let ty = match ty {
Some(ty) => ty,
None => return Ok(None),
};
if !unknown_attrs.is_empty() {
bail!("unknown attribute(s): #[prost({})]", quote!(#(#unknown_attrs),*));
}
let tag = match tag.or(inferred_tag) {
Some(tag) => tag,
None => bail!("missing tag attribute"),
};
let has_default = default.is_some();
let default = default
.map_or_else(|| Ok(DefaultValue::new(&ty)), |lit| DefaultValue::from_lit(&ty, lit))?;
let kind = match (label, packed, has_default) {
(None, Some(true), _)
| (Some(Label::Optional), Some(true), _)
| (Some(Label::Required), Some(true), _) => {
bail!("packed attribute may only be applied to repeated fields");
}
(Some(Label::Repeated), Some(true), _) if !ty.is_numeric() => {
bail!("packed attribute may only be applied to numeric types");
}
(Some(Label::Repeated), _, true) => {
bail!("repeated fields may not have a default value");
}
(None, _, _) => Kind::Plain(default),
(Some(Label::Optional), _, _) => Kind::Optional(default),
(Some(Label::Required), _, _) => Kind::Required(default),
(Some(Label::Repeated), packed, false) if packed.unwrap_or_else(|| ty.is_numeric()) => {
Kind::Packed
}
(Some(Label::Repeated), _, false) => Kind::Repeated,
};
Ok(Some(Field { ty, kind, tag }))
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
if let Some(mut field) = Field::new(attrs, None)? {
match field.kind {
Kind::Plain(default) => {
field.kind = Kind::Required(default);
Ok(Some(field))
}
Kind::Optional(..) => bail!("invalid optional attribute on oneof field"),
Kind::Required(..) => bail!("invalid required attribute on oneof field"),
Kind::Packed | Kind::Repeated => bail!("invalid repeated attribute on oneof field"),
}
} else {
Ok(None)
}
}
pub fn encode(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let module = self.ty.module();
let encode_fn = match self.kind {
Kind::Plain(..) | Kind::Optional(..) | Kind::Required(..) => quote!(encode),
Kind::Repeated => quote!(encode_repeated),
Kind::Packed => quote!(encode_packed),
};
let encode_fn = quote!(#prost_path::encoding::#module::#encode_fn);
let tag = self.tag;
match self.kind {
Kind::Plain(ref default) => {
let default = default.typed();
quote! {
if #ident != #default {
const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };
#encode_fn(TAG, &#ident, buf);
}
}
}
Kind::Optional(..) => quote! {
if let ::core::option::Option::Some(ref value) = #ident {
const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };
#encode_fn(TAG, value, buf);
}
},
Kind::Required(..) | Kind::Repeated | Kind::Packed => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#encode_fn(TAG, &#ident, buf);}
},
}
}
/// Returns an expression which evaluates to the result of merging a decoded
/// scalar value into the field.
pub fn merge(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let module = self.ty.module();
let merge_fn = match self.kind {
Kind::Plain(..) | Kind::Optional(..) | Kind::Required(..) => quote!(merge),
Kind::Repeated | Kind::Packed => quote!(merge_repeated),
};
let merge_fn = quote!(#prost_path::encoding::#module::#merge_fn);
match self.kind {
Kind::Plain(..) | Kind::Required(..) | Kind::Repeated | Kind::Packed => quote! {
#merge_fn(wire_type, #ident, buf, ctx)
},
Kind::Optional(..) => quote! {
#merge_fn(wire_type,
#ident.get_or_insert_with(::core::default::Default::default),
buf,
ctx)
},
}
}
/// Returns an expression which evaluates to the encoded length of the field.
pub fn encoded_len(&self, prost_path: &Path, ident: TokenStream) -> TokenStream {
let module = self.ty.module();
let encoded_len_fn = match self.kind {
Kind::Plain(..) | Kind::Optional(..) | Kind::Required(..) => quote!(encoded_len),
Kind::Repeated => quote!(encoded_len_repeated),
Kind::Packed => quote!(encoded_len_packed),
};
let encoded_len_fn = quote!(#prost_path::encoding::#module::#encoded_len_fn);
let tag = self.tag;
match self.kind {
Kind::Plain(ref default) => {
let default = default.typed();
quote! {
if #ident != #default {
const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };
#encoded_len_fn(TAG, &#ident)
} else {
0
}
}
}
Kind::Optional(..) => quote! {
#ident.as_ref().map_or(0, |value| {const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#encoded_len_fn(TAG, value)})
},
Kind::Required(..) | Kind::Repeated | Kind::Packed => quote! {
{const TAG: ::core::num::NonZeroU32 = unsafe { ::core::num::NonZeroU32::new_unchecked(#tag) };#encoded_len_fn(TAG, &#ident)}
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match self.kind {
Kind::Plain(ref default) | Kind::Required(ref default) => {
let default = default.typed();
match self.ty {
Ty::String(..) | Ty::Bytes(..) => quote!(#ident.clear()),
_ => quote!(#ident = #default),
}
}
Kind::Optional(_) => quote!(#ident = ::core::option::Option::None),
Kind::Repeated | Kind::Packed => quote!(#ident.clear()),
}
}
/// Returns an expression which evaluates to the default value of the field.
pub fn default(&self, prost_path: &Path) -> TokenStream {
match self.kind {
Kind::Plain(ref value) | Kind::Required(ref value) => value.owned(),
Kind::Optional(_) => quote!(::core::option::Option::None),
Kind::Repeated | Kind::Packed => quote!(#prost_path::alloc::vec::Vec::new()),
}
}
/// An inner debug wrapper, around the base type.
fn debug_inner(&self, wrap_name: TokenStream) -> TokenStream {
if let Ty::Enumeration(ref ty) = self.ty {
quote! {
struct #wrap_name<'a>(&'a i32);
impl<'a> ::core::fmt::Debug for #wrap_name<'a> {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let res: ::core::result::Result<#ty, _> = ::core::convert::TryFrom::try_from(*self.0);
match res {
Err(_) => ::core::fmt::Debug::fmt(&self.0, f),
Ok(en) => ::core::fmt::Debug::fmt(&en, f),
}
}
}
}
} else {
quote! {
#[allow(non_snake_case)]
fn #wrap_name<T>(v: T) -> T { v }
}
}
}
/// Returns a fragment for formatting the field `ident` in `Debug`.
pub fn debug(&self, prost_path: &Path, wrapper_name: TokenStream) -> TokenStream {
let wrapper = self.debug_inner(quote!(Inner));
let inner_ty = self.ty.rust_type(prost_path);
match self.kind {
Kind::Plain(_) | Kind::Required(_) => self.debug_inner(wrapper_name),
Kind::Optional(_) => quote! {
struct #wrapper_name<'a>(&'a ::core::option::Option<#inner_ty>);
impl<'a> ::core::fmt::Debug for #wrapper_name<'a> {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
#wrapper
::core::fmt::Debug::fmt(&self.0.as_ref().map(Inner), f)
}
}
},
Kind::Repeated | Kind::Packed => {
quote! {
struct #wrapper_name<'a>(&'a #prost_path::alloc::vec::Vec<#inner_ty>);
impl<'a> ::core::fmt::Debug for #wrapper_name<'a> {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let mut vec_builder = f.debug_list();
for v in self.0 {
#wrapper
vec_builder.entry(&Inner(v));
}
vec_builder.finish()
}
}
}
}
}
}
/// Returns methods to embed in the message.
pub fn methods(&self, ident: &TokenStream) -> Option<TokenStream> {
let mut ident_str = ident.to_string();
if ident_str.starts_with("r#") {
ident_str = ident_str.split_off(2);
}
// Prepend `get_` for getter methods of tuple structs.
let get = match syn::parse_str::<Index>(&ident_str) {
Ok(index) => {
let get = Ident::new(&format!("get_{}", index.index), Span::call_site());
quote!(#get)
}
Err(_) => quote!(#ident),
};
if let Ty::Enumeration(ref ty) = self.ty {
let set = Ident::new(&format!("set_{ident_str}"), Span::call_site());
let set_doc = format!("Sets `{ident_str}` to the provided enum value.");
Some(match self.kind {
Kind::Plain(ref default) | Kind::Required(ref default) => {
let get_doc = format!(
"Returns the enum value of `{ident_str}`, \
or the default if the field is set to an invalid enum value."
);
quote! {
#[doc=#get_doc]
pub fn #get(&self) -> #ty {
::core::convert::TryFrom::try_from(self.#ident).unwrap_or(#default)
}
#[doc=#set_doc]
pub fn #set(&mut self, value: #ty) {
self.#ident = value as i32;
}
}
}
Kind::Optional(ref default) => {
let get_doc = format!(
"Returns the enum value of `{ident_str}`, \
or the default if the field is unset or set to an invalid enum value."
);
quote! {
#[doc=#get_doc]
pub fn #get(&self) -> #ty {
self.#ident.and_then(|x| {
let result: ::core::result::Result<#ty, _> = ::core::convert::TryFrom::try_from(x);
result.ok()
}).unwrap_or(#default)
}
#[doc=#set_doc]
pub fn #set(&mut self, value: #ty) {
self.#ident = ::core::option::Option::Some(value as i32);
}
}
}
Kind::Repeated | Kind::Packed => {
let iter_doc = format!(
"Returns an iterator which yields the valid enum values contained in `{ident_str}`."
);
let push = Ident::new(&format!("push_{ident_str}"), Span::call_site());
let push_doc = format!("Appends the provided enum value to `{ident_str}`.");
quote! {
#[doc=#iter_doc]
pub fn #get(&self) -> ::core::iter::FilterMap<
::core::iter::Cloned<::core::slice::Iter<i32>>,
fn(i32) -> ::core::option::Option<#ty>,
> {
self.#ident.iter().cloned().filter_map(|x| {
let result: ::core::result::Result<#ty, _> = ::core::convert::TryFrom::try_from(x);
result.ok()
})
}
#[doc=#push_doc]
pub fn #push(&mut self, value: #ty) {
self.#ident.push(value as i32);
}
}
}
})
} else if let Kind::Optional(ref default) = self.kind {
let ty = self.ty.rust_ref_type();
let match_some = if self.ty.is_numeric() {
quote!(::core::option::Option::Some(val) => val,)
} else {
quote!(::core::option::Option::Some(ref val) => &val[..],)
};
let get_doc = format!(
"Returns the value of `{ident_str}`, or the default value if `{ident_str}` is unset."
);
Some(quote! {
#[doc=#get_doc]
pub fn #get(&self) -> #ty {
match self.#ident {
#match_some
::core::option::Option::None => #default,
}
}
})
} else {
None
}
}
}
/// A scalar protobuf field type.
#[derive(Clone, PartialEq, Eq)]
pub enum Ty {
Double,
Float,
Int32,
Int64,
Uint32,
Uint64,
Sint32,
Sint64,
Fixed32,
Fixed64,
Sfixed32,
Sfixed64,
Bool,
String(StringTy),
Bytes(BytesTy),
Enumeration(Path),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum StringTy {
Vec,
Bytes,
}
impl StringTy {
fn try_from_str(s: &str) -> Result<Self, Error> {
match s {
"vec" => Ok(StringTy::Vec),
"bytes" => Ok(StringTy::Bytes),
_ => bail!("Invalid string type: {s}"),
}
}
fn rust_type(&self, prost_path: &Path) -> TokenStream {
match self {
StringTy::Vec => quote! { #prost_path::alloc::string::String },
StringTy::Bytes => quote! { #prost_path::ByteStr },
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BytesTy {
Vec,
Bytes,
}
impl BytesTy {
fn try_from_str(s: &str) -> Result<Self, Error> {
match s {
"vec" => Ok(BytesTy::Vec),
"bytes" => Ok(BytesTy::Bytes),
_ => bail!("Invalid bytes type: {}", s),
}
}
fn rust_type(&self, prost_path: &Path) -> TokenStream {
match self {
BytesTy::Vec => quote! { #prost_path::alloc::vec::Vec<u8> },
BytesTy::Bytes => quote! { #prost_path::bytes::Bytes },
}
}
}
impl Ty {
pub fn from_attr(attr: &Meta) -> Result<Option<Ty>, Error> {
let ty = match *attr {
Meta::Path(ref name) if name.is_ident("float") => Ty::Float,
Meta::Path(ref name) if name.is_ident("double") => Ty::Double,
Meta::Path(ref name) if name.is_ident("int32") => Ty::Int32,
Meta::Path(ref name) if name.is_ident("int64") => Ty::Int64,
Meta::Path(ref name) if name.is_ident("uint32") => Ty::Uint32,
Meta::Path(ref name) if name.is_ident("uint64") => Ty::Uint64,
Meta::Path(ref name) if name.is_ident("sint32") => Ty::Sint32,
Meta::Path(ref name) if name.is_ident("sint64") => Ty::Sint64,
Meta::Path(ref name) if name.is_ident("fixed32") => Ty::Fixed32,
Meta::Path(ref name) if name.is_ident("fixed64") => Ty::Fixed64,
Meta::Path(ref name) if name.is_ident("sfixed32") => Ty::Sfixed32,
Meta::Path(ref name) if name.is_ident("sfixed64") => Ty::Sfixed64,
Meta::Path(ref name) if name.is_ident("bool") => Ty::Bool,
Meta::Path(ref name) if name.is_ident("string") => Ty::String(StringTy::Vec),
Meta::Path(ref name) if name.is_ident("bytes") => Ty::Bytes(BytesTy::Vec),
Meta::NameValue(MetaNameValue {
ref path,
value: Expr::Lit(ExprLit { lit: Lit::Str(ref l), .. }),
..
}) if path.is_ident("string") => Ty::String(StringTy::try_from_str(&l.value())?),
Meta::NameValue(MetaNameValue {
ref path,
value: Expr::Lit(ExprLit { lit: Lit::Str(ref l), .. }),
..
}) if path.is_ident("bytes") => Ty::Bytes(BytesTy::try_from_str(&l.value())?),
Meta::NameValue(MetaNameValue {
ref path,
value: Expr::Lit(ExprLit { lit: Lit::Str(ref l), .. }),
..
}) if path.is_ident("enumeration") => Ty::Enumeration(parse_str::<Path>(&l.value())?),
Meta::List(ref meta_list) if meta_list.path.is_ident("enumeration") => {
Ty::Enumeration(meta_list.parse_args::<Path>()?)
}
_ => return Ok(None),
};
Ok(Some(ty))
}
pub fn from_str(s: &str) -> Result<Ty, Error> {
let enumeration_len = "enumeration".len();
let error = Err(anyhow!("invalid type: {}", s));
let ty = match s.trim() {
"float" => Ty::Float,
"double" => Ty::Double,
"int32" => Ty::Int32,
"int64" => Ty::Int64,
"uint32" => Ty::Uint32,
"uint64" => Ty::Uint64,
"sint32" => Ty::Sint32,
"sint64" => Ty::Sint64,
"fixed32" => Ty::Fixed32,
"fixed64" => Ty::Fixed64,
"sfixed32" => Ty::Sfixed32,
"sfixed64" => Ty::Sfixed64,
"bool" => Ty::Bool,
"string" => Ty::String(StringTy::Vec),
"bytes" => Ty::Bytes(BytesTy::Vec),
s if s.len() > enumeration_len && &s[..enumeration_len] == "enumeration" => {
let s = &s[enumeration_len..].trim();
match s.chars().next() {
Some('<') | Some('(') => (),
_ => return error,
}
match s.chars().next_back() {
Some('>') | Some(')') => (),
_ => return error,
}
Ty::Enumeration(parse_str::<Path>(s[1..s.len() - 1].trim())?)
}
_ => return error,
};
Ok(ty)
}
/// Returns the type as it appears in protobuf field declarations.
pub fn as_str(&self) -> &'static str {
match *self {
Ty::Double => "double",
Ty::Float => "float",
Ty::Int32 => "int32",
Ty::Int64 => "int64",
Ty::Uint32 => "uint32",
Ty::Uint64 => "uint64",
Ty::Sint32 => "sint32",
Ty::Sint64 => "sint64",
Ty::Fixed32 => "fixed32",
Ty::Fixed64 => "fixed64",
Ty::Sfixed32 => "sfixed32",
Ty::Sfixed64 => "sfixed64",
Ty::Bool => "bool",
Ty::String(..) => "string",
Ty::Bytes(..) => "bytes",
Ty::Enumeration(..) => "enum",
}
}
// TODO: rename to 'owned_type'.
pub fn rust_type(&self, prost_path: &Path) -> TokenStream {
match self {
Ty::String(ty) => ty.rust_type(prost_path),
Ty::Bytes(ty) => ty.rust_type(prost_path),
_ => self.rust_ref_type(),
}
}
// TODO: rename to 'ref_type'
pub fn rust_ref_type(&self) -> TokenStream {
match *self {
Ty::Double => quote!(f64),
Ty::Float => quote!(f32),
Ty::Int32 => quote!(i32),
Ty::Int64 => quote!(i64),
Ty::Uint32 => quote!(u32),
Ty::Uint64 => quote!(u64),
Ty::Sint32 => quote!(i32),
Ty::Sint64 => quote!(i64),
Ty::Fixed32 => quote!(u32),
Ty::Fixed64 => quote!(u64),
Ty::Sfixed32 => quote!(i32),
Ty::Sfixed64 => quote!(i64),
Ty::Bool => quote!(bool),
Ty::String(..) => quote!(&str),
Ty::Bytes(..) => quote!(&[u8]),
Ty::Enumeration(..) => quote!(i32),
}
}
pub fn module(&self) -> Ident {
match *self {
Ty::Enumeration(..) => Ident::new("int32", Span::call_site()),
_ => Ident::new(self.as_str(), Span::call_site()),
}
}
/// Returns false if the scalar type is length delimited (i.e., `string` or `bytes`).
pub fn is_numeric(&self) -> bool {
!matches!(self, Ty::String(..) | Ty::Bytes(..))
}
}
impl fmt::Debug for Ty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
impl fmt::Display for Ty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Scalar Protobuf field types.
#[derive(Clone)]
pub enum Kind {
/// A plain proto3 scalar field.
Plain(DefaultValue),
/// An optional scalar field.
Optional(DefaultValue),
/// A required proto2 scalar field.
Required(DefaultValue),
/// A repeated scalar field.
Repeated,
/// A packed repeated scalar field.
Packed,
}
/// Scalar Protobuf field default value.
#[derive(Clone, Debug)]
pub enum DefaultValue {
F64(f64),
F32(f32),
I32(i32),
I64(i64),
U32(u32),
U64(u64),
Bool(bool),
String(String),
Bytes(Vec<u8>),
Enumeration(TokenStream),
Path(Path),
}
impl DefaultValue {
pub fn from_attr(attr: &Meta) -> Result<Option<Lit>, Error> {
if !attr.path().is_ident("default") {
Ok(None)
} else if let Meta::NameValue(MetaNameValue {
value: Expr::Lit(ExprLit { ref lit, .. }),
..
}) = *attr
{
Ok(Some(lit.clone()))
} else {
bail!("invalid default value attribute: {:?}", attr)
}
}
pub fn from_lit(ty: &Ty, lit: Lit) -> Result<DefaultValue, Error> {
let is_i32 = *ty == Ty::Int32 || *ty == Ty::Sint32 || *ty == Ty::Sfixed32;
let is_i64 = *ty == Ty::Int64 || *ty == Ty::Sint64 || *ty == Ty::Sfixed64;
let is_u32 = *ty == Ty::Uint32 || *ty == Ty::Fixed32;
let is_u64 = *ty == Ty::Uint64 || *ty == Ty::Fixed64;
let empty_or_is = |expected, actual: &str| expected == actual || actual.is_empty();
let default = match lit {
Lit::Int(ref lit) if is_i32 && empty_or_is("i32", lit.suffix()) => {
DefaultValue::I32(lit.base10_parse()?)
}
Lit::Int(ref lit) if is_i64 && empty_or_is("i64", lit.suffix()) => {
DefaultValue::I64(lit.base10_parse()?)
}
Lit::Int(ref lit) if is_u32 && empty_or_is("u32", lit.suffix()) => {
DefaultValue::U32(lit.base10_parse()?)
}
Lit::Int(ref lit) if is_u64 && empty_or_is("u64", lit.suffix()) => {
DefaultValue::U64(lit.base10_parse()?)
}
Lit::Float(ref lit) if *ty == Ty::Float && empty_or_is("f32", lit.suffix()) => {
DefaultValue::F32(lit.base10_parse()?)
}
Lit::Int(ref lit) if *ty == Ty::Float => DefaultValue::F32(lit.base10_parse()?),
Lit::Float(ref lit) if *ty == Ty::Double && empty_or_is("f64", lit.suffix()) => {
DefaultValue::F64(lit.base10_parse()?)
}
Lit::Int(ref lit) if *ty == Ty::Double => DefaultValue::F64(lit.base10_parse()?),
Lit::Bool(ref lit) if *ty == Ty::Bool => DefaultValue::Bool(lit.value),
Lit::Str(ref lit)
if *ty == Ty::String(StringTy::Bytes) || *ty == Ty::String(StringTy::Vec) =>
{
DefaultValue::String(lit.value())
}
Lit::ByteStr(ref lit)
if *ty == Ty::Bytes(BytesTy::Bytes) || *ty == Ty::Bytes(BytesTy::Vec) =>
{
DefaultValue::Bytes(lit.value())
}
Lit::Str(ref lit) => {
let value = lit.value();
let value = value.trim();
if let Ty::Enumeration(ref path) = *ty {
let variant = Ident::new(value, Span::call_site());
return Ok(DefaultValue::Enumeration(quote!(#path::#variant)));
}
// Parse special floating point values.
if *ty == Ty::Float {
match value {
"inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::core::f32::INFINITY",
)?));
}
"-inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::core::f32::NEG_INFINITY",
)?));
}
"nan" => {
return Ok(DefaultValue::Path(parse_str::<Path>("::core::f32::NAN")?));
}
_ => (),
}
}
if *ty == Ty::Double {
match value {
"inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::core::f64::INFINITY",
)?));
}
"-inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::core::f64::NEG_INFINITY",
)?));
}
"nan" => {
return Ok(DefaultValue::Path(parse_str::<Path>("::core::f64::NAN")?));
}
_ => (),
}
}
// Rust doesn't have a negative literals, so they have to be parsed specially.
if let Some(Ok(lit)) = value.strip_prefix('-').map(syn::parse_str::<Lit>) {
match lit {
Lit::Int(ref lit) if is_i32 && empty_or_is("i32", lit.suffix()) => {
// Initially parse into an i64, so that i32::MIN does not overflow.
let value: i64 = -lit.base10_parse()?;
return Ok(i32::try_from(value).map(DefaultValue::I32)?);
}
Lit::Int(ref lit) if is_i64 && empty_or_is("i64", lit.suffix()) => {
// Initially parse into an i128, so that i64::MIN does not overflow.
let value: i128 = -lit.base10_parse()?;
return Ok(i64::try_from(value).map(DefaultValue::I64)?);
}
Lit::Float(ref lit)
if *ty == Ty::Float && empty_or_is("f32", lit.suffix()) =>
{
return Ok(DefaultValue::F32(-lit.base10_parse()?));
}
Lit::Float(ref lit)
if *ty == Ty::Double && empty_or_is("f64", lit.suffix()) =>
{
return Ok(DefaultValue::F64(-lit.base10_parse()?));
}
Lit::Int(ref lit) if *ty == Ty::Float && lit.suffix().is_empty() => {
return Ok(DefaultValue::F32(-lit.base10_parse()?));
}
Lit::Int(ref lit) if *ty == Ty::Double && lit.suffix().is_empty() => {
return Ok(DefaultValue::F64(-lit.base10_parse()?));
}
_ => (),
}
}
match syn::parse_str::<Lit>(value) {
Ok(Lit::Str(_)) => (),
Ok(lit) => return DefaultValue::from_lit(ty, lit),
_ => (),
}
bail!("invalid default value: {}", quote!(#value));
}
_ => bail!("invalid default value: {}", quote!(#lit)),
};
Ok(default)
}
pub fn new(ty: &Ty) -> DefaultValue {
match *ty {
Ty::Float => DefaultValue::F32(0.0),
Ty::Double => DefaultValue::F64(0.0),
Ty::Int32 | Ty::Sint32 | Ty::Sfixed32 => DefaultValue::I32(0),
Ty::Int64 | Ty::Sint64 | Ty::Sfixed64 => DefaultValue::I64(0),
Ty::Uint32 | Ty::Fixed32 => DefaultValue::U32(0),
Ty::Uint64 | Ty::Fixed64 => DefaultValue::U64(0),
Ty::Bool => DefaultValue::Bool(false),
Ty::String(..) => DefaultValue::String(String::new()),
Ty::Bytes(..) => DefaultValue::Bytes(Vec::new()),
Ty::Enumeration(ref path) => DefaultValue::Enumeration(quote!(#path::default())),
}
}
pub fn owned(&self) -> TokenStream {
match *self {
DefaultValue::String(ref value) if value.is_empty() => {
quote!(::core::default::Default::default())
}
DefaultValue::String(ref value) => quote!(#value.into()),
DefaultValue::Bytes(ref value) if value.is_empty() => {
quote!(::core::default::Default::default())
}
DefaultValue::Bytes(ref value) => {
let lit = LitByteStr::new(value, Span::call_site());
quote!(#lit.as_ref().into())
}
ref other => other.typed(),
}
}
pub fn typed(&self) -> TokenStream {
if let DefaultValue::Enumeration(_) = *self { quote!(#self as i32) } else { quote!(#self) }
}
}
impl ToTokens for DefaultValue {
fn to_tokens(&self, tokens: &mut TokenStream) {
match *self {
DefaultValue::F64(value) => value.to_tokens(tokens),
DefaultValue::F32(value) => value.to_tokens(tokens),
DefaultValue::I32(value) => value.to_tokens(tokens),
DefaultValue::I64(value) => value.to_tokens(tokens),
DefaultValue::U32(value) => value.to_tokens(tokens),
DefaultValue::U64(value) => value.to_tokens(tokens),
DefaultValue::Bool(value) => value.to_tokens(tokens),
DefaultValue::String(ref value) => value.to_tokens(tokens),
DefaultValue::Bytes(ref value) => {
let byte_str = LitByteStr::new(value, Span::call_site());
tokens.append_all(quote!(#byte_str as &[u8]));
}
DefaultValue::Enumeration(ref value) => value.to_tokens(tokens),
DefaultValue::Path(ref value) => value.to_tokens(tokens),
}
}
}

View File

@@ -0,0 +1,691 @@
#![doc(html_root_url = "https://docs.rs/prost-derive/0.14.1")]
// The `quote!` macro requires deep recursion.
#![recursion_limit = "4096"]
extern crate alloc;
extern crate proc_macro;
use anyhow::{bail, Context, Error};
use itertools::Itertools;
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{
punctuated::Punctuated, Data, DataEnum, DataStruct, DeriveInput, Expr, ExprLit, Fields,
FieldsNamed, FieldsUnnamed, Ident, Index, Variant,
};
use syn::{Attribute, Lit, Meta, MetaNameValue, Path, Token};
mod field;
use crate::field::Field;
use self::field::set_option;
fn try_message(input: TokenStream) -> Result<TokenStream, Error> {
let input: DeriveInput = syn::parse2(input)?;
let ident = input.ident;
let Attributes {
skip_debug,
prost_path,
} = Attributes::new(input.attrs)?;
let variant_data = match input.data {
Data::Struct(variant_data) => variant_data,
Data::Enum(..) => bail!("Message can not be derived for an enum"),
Data::Union(..) => bail!("Message can not be derived for a union"),
};
let generics = &input.generics;
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let (is_struct, fields) = match variant_data {
DataStruct {
fields: Fields::Named(FieldsNamed { named: fields, .. }),
..
} => (true, fields.into_iter().collect()),
DataStruct {
fields:
Fields::Unnamed(FieldsUnnamed {
unnamed: fields, ..
}),
..
} => (false, fields.into_iter().collect()),
DataStruct {
fields: Fields::Unit,
..
} => (false, Vec::new()),
};
let mut next_tag: u32 = 1;
let mut fields = fields
.into_iter()
.enumerate()
.flat_map(|(i, field)| {
let field_ident = field.ident.map(|x| quote!(#x)).unwrap_or_else(|| {
let index = Index {
index: i as u32,
span: Span::call_site(),
};
quote!(#index)
});
match Field::new(field.attrs, Some(next_tag)) {
Ok(Some(field)) => {
next_tag = field.tags().iter().max().map(|t| t + 1).unwrap_or(next_tag);
Some(Ok((field_ident, field)))
}
Ok(None) => None,
Err(err) => Some(Err(
err.context(format!("invalid message field {ident}.{field_ident}"))
)),
}
})
.collect::<Result<Vec<_>, _>>()?;
// We want Debug to be in declaration order
let unsorted_fields = fields.clone();
// Sort the fields by tag number so that fields will be encoded in tag order.
// TODO: This encodes oneof fields in the position of their lowest tag,
// regardless of the currently occupied variant, is that consequential?
// See: https://developers.google.com/protocol-buffers/docs/encoding#order
fields.sort_by_key(|(_, field)| field.tags().into_iter().min().unwrap());
let fields = fields;
if let Some(duplicate_tag) = fields
.iter()
.flat_map(|(_, field)| field.tags())
.duplicates()
.next()
{
bail!(
"message {} has multiple fields with tag {}",
ident,
duplicate_tag
)
};
let encoded_len = fields
.iter()
.map(|(field_ident, field)| field.encoded_len(&prost_path, quote!(self.#field_ident)));
let encode = fields
.iter()
.map(|(field_ident, field)| field.encode(&prost_path, quote!(self.#field_ident)));
let merge = fields.iter().map(|(field_ident, field)| {
let merge = field.merge(&prost_path, quote!(value));
let tags = field.tags().into_iter().map(|tag| quote!(#tag));
let tags = Itertools::intersperse(tags, quote!(|));
quote! {
#(#tags)* => {
let mut value = &mut self.#field_ident;
#merge.map_err(|mut error| {
error.push(STRUCT_NAME, stringify!(#field_ident));
error
})
},
}
});
let struct_name = if fields.is_empty() {
quote!()
} else {
quote!(
const STRUCT_NAME: &'static str = stringify!(#ident);
)
};
let clear = fields
.iter()
.map(|(field_ident, field)| field.clear(quote!(self.#field_ident)));
let default = if is_struct {
let default = fields.iter().map(|(field_ident, field)| {
let value = field.default(&prost_path);
quote!(#field_ident: #value,)
});
quote! {#ident {
#(#default)*
}}
} else {
let default = fields.iter().map(|(_, field)| {
let value = field.default(&prost_path);
quote!(#value,)
});
quote! {#ident (
#(#default)*
)}
};
let methods = fields
.iter()
.flat_map(|(field_ident, field)| field.methods(&prost_path, field_ident))
.collect::<Vec<_>>();
let methods = if methods.is_empty() {
quote!()
} else {
quote! {
#[allow(dead_code)]
impl #impl_generics #ident #ty_generics #where_clause {
#(#methods)*
}
}
};
let expanded = quote! {
impl #impl_generics #prost_path::Message for #ident #ty_generics #where_clause {
#[allow(unused_variables)]
fn encode_raw(&self, buf: &mut impl #prost_path::bytes::BufMut) {
#(#encode)*
}
#[allow(unused_variables)]
fn merge_field(
&mut self,
number: ::core::num::NonZeroU32,
wire_type: #prost_path::encoding::wire_type::WireType,
buf: &mut impl #prost_path::bytes::Buf,
ctx: #prost_path::encoding::DecodeContext,
) -> ::core::result::Result<(), #prost_path::DecodeError>
{
#struct_name
match number.get() {
#(#merge)*
_ => #prost_path::encoding::skip_field(wire_type, number, buf, ctx),
}
}
#[inline]
fn encoded_len(&self) -> usize {
0 #(+ #encoded_len)*
}
fn clear(&mut self) {
#(#clear;)*
}
}
impl #impl_generics ::core::default::Default for #ident #ty_generics #where_clause {
fn default() -> Self {
#default
}
}
};
let expanded = if skip_debug {
expanded
} else {
let debugs = unsorted_fields.iter().map(|(field_ident, field)| {
let wrapper = field.debug(&prost_path, quote!(self.#field_ident));
let call = if is_struct {
quote!(builder.field(stringify!(#field_ident), &wrapper))
} else {
quote!(builder.field(&wrapper))
};
quote! {
let builder = {
let wrapper = #wrapper;
#call
};
}
});
let debug_builder = if is_struct {
quote!(f.debug_struct(stringify!(#ident)))
} else {
quote!(f.debug_tuple(stringify!(#ident)))
};
quote! {
#expanded
impl #impl_generics ::core::fmt::Debug for #ident #ty_generics #where_clause {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let mut builder = #debug_builder;
#(#debugs;)*
builder.finish()
}
}
}
};
let expanded = quote! {
#expanded
#methods
};
Ok(expanded)
}
#[proc_macro_derive(Message, attributes(prost))]
pub fn message(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
try_message(input.into()).unwrap().into()
}
fn try_enumeration(input: TokenStream) -> Result<TokenStream, Error> {
let input: DeriveInput = syn::parse2(input)?;
let ident = input.ident;
let Attributes { prost_path, .. } = Attributes::new(input.attrs)?;
let generics = &input.generics;
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let punctuated_variants = match input.data {
Data::Enum(DataEnum { variants, .. }) => variants,
Data::Struct(_) => bail!("Enumeration can not be derived for a struct"),
Data::Union(..) => bail!("Enumeration can not be derived for a union"),
};
// Map the variants into 'fields'.
let mut variants: Vec<(Ident, Expr, Option<TokenStream>)> = Vec::new();
for Variant {
attrs,
ident,
fields,
discriminant,
..
} in punctuated_variants
{
match fields {
Fields::Unit => (),
Fields::Named(_) | Fields::Unnamed(_) => {
bail!("Enumeration variants may not have fields")
}
}
match discriminant {
Some((_, expr)) => {
let deprecated_attr = if attrs.iter().any(|v| v.path().is_ident("deprecated")) {
Some(quote!(#[allow(deprecated)]))
} else {
None
};
variants.push((ident, expr, deprecated_attr))
}
None => bail!("Enumeration variants must have a discriminant"),
}
}
if variants.is_empty() {
panic!("Enumeration must have at least one variant");
}
let (default, _, default_deprecated) = variants[0].clone();
let is_valid = variants.iter().map(|(_, value, _)| quote!(#value => true));
let from = variants
.iter()
.map(|(variant, value, deprecated)| quote!(#value => ::core::option::Option::Some(#deprecated #ident::#variant)));
let try_from = variants
.iter()
.map(|(variant, value, deprecated)| quote!(#value => ::core::result::Result::Ok(#deprecated #ident::#variant)));
let is_valid_doc = format!("Returns `true` if `value` is a variant of `{ident}`.");
let from_i32_doc =
format!("Converts an `i32` to a `{ident}`, or `None` if `value` is not a valid variant.");
let expanded = quote! {
impl #impl_generics #ident #ty_generics #where_clause {
#[doc=#is_valid_doc]
pub fn is_valid(value: i32) -> bool {
match value {
#(#is_valid,)*
_ => false,
}
}
#[deprecated = "Use the TryFrom<i32> implementation instead"]
#[doc=#from_i32_doc]
pub fn from_i32(value: i32) -> ::core::option::Option<#ident> {
match value {
#(#from,)*
_ => ::core::option::Option::None,
}
}
}
impl #impl_generics ::core::default::Default for #ident #ty_generics #where_clause {
fn default() -> #ident {
#default_deprecated #ident::#default
}
}
impl #impl_generics ::core::convert::From::<#ident> for i32 #ty_generics #where_clause {
fn from(value: #ident) -> i32 {
value as i32
}
}
impl #impl_generics ::core::convert::TryFrom::<i32> for #ident #ty_generics #where_clause {
type Error = #prost_path::UnknownEnumValue;
fn try_from(value: i32) -> ::core::result::Result<#ident, #prost_path::UnknownEnumValue> {
match value {
#(#try_from,)*
_ => ::core::result::Result::Err(#prost_path::UnknownEnumValue(value)),
}
}
}
};
Ok(expanded)
}
#[proc_macro_derive(Enumeration, attributes(prost))]
pub fn enumeration(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
try_enumeration(input.into()).unwrap().into()
}
fn try_oneof(input: TokenStream) -> Result<TokenStream, Error> {
let input: DeriveInput = syn::parse2(input)?;
let ident = input.ident;
let Attributes {
skip_debug,
prost_path,
} = Attributes::new(input.attrs)?;
let variants = match input.data {
Data::Enum(DataEnum { variants, .. }) => variants,
Data::Struct(..) => bail!("Oneof can not be derived for a struct"),
Data::Union(..) => bail!("Oneof can not be derived for a union"),
};
let generics = &input.generics;
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
// Map the variants into 'fields'.
let mut fields: Vec<(Ident, Field, Option<TokenStream>)> = Vec::new();
for Variant {
attrs,
ident: variant_ident,
fields: variant_fields,
..
} in variants
{
let variant_fields = match variant_fields {
Fields::Unit => Punctuated::new(),
Fields::Named(FieldsNamed { named: fields, .. })
| Fields::Unnamed(FieldsUnnamed {
unnamed: fields, ..
}) => fields,
};
if variant_fields.len() != 1 {
bail!("Oneof enum variants must have a single field");
}
let deprecated_attr = if attrs.iter().any(|v| v.path().is_ident("deprecated")) {
Some(quote!(#[allow(deprecated)]))
} else {
None
};
match Field::new_oneof(attrs)? {
Some(field) => fields.push((variant_ident, field, deprecated_attr)),
None => bail!("invalid oneof variant: oneof variants may not be ignored"),
}
}
// Oneof variants cannot be oneofs themselves, so it's impossible to have a field with multiple
// tags.
assert!(fields.iter().all(|(_, field, _)| field.tags().len() == 1));
if let Some(duplicate_tag) = fields
.iter()
.flat_map(|(_, field, _)| field.tags())
.duplicates()
.next()
{
bail!(
"invalid oneof {}: multiple variants have tag {}",
ident,
duplicate_tag
);
}
let encode = fields.iter().map(|(variant_ident, field, deprecated)| {
let encode = field.encode(&prost_path, quote!(*value));
quote!(#deprecated #ident::#variant_ident(ref value) => { #encode })
});
let merge = fields.iter().map(|(variant_ident, field, deprecated)| {
let tag = field.tags()[0];
let merge = field.merge(&prost_path, quote!(value));
quote! {
#deprecated
#tag => if let ::core::option::Option::Some(#ident::#variant_ident(value)) = field {
#merge
} else {
let mut owned_value = ::core::default::Default::default();
let value = &mut owned_value;
#merge.map(|_| *field = ::core::option::Option::Some(#deprecated #ident::#variant_ident(owned_value)))
}
}
});
let encoded_len = fields.iter().map(|(variant_ident, field, deprecated)| {
let encoded_len = field.encoded_len(&prost_path, quote!(*value));
quote!(#deprecated #ident::#variant_ident(ref value) => #encoded_len)
});
let expanded = quote! {
impl #impl_generics #ident #ty_generics #where_clause {
/// Encodes the message to a buffer.
pub fn encode(&self, buf: &mut impl #prost_path::bytes::BufMut) {
match *self {
#(#encode,)*
}
}
/// Decodes an instance of the message from a buffer, and merges it into self.
pub fn merge(
field: &mut ::core::option::Option<#ident #ty_generics>,
number: ::core::num::NonZeroU32,
wire_type: #prost_path::encoding::wire_type::WireType,
buf: &mut impl #prost_path::bytes::Buf,
ctx: #prost_path::encoding::DecodeContext,
) -> ::core::result::Result<(), #prost_path::DecodeError>
{
match number.get() {
#(#merge,)*
_ => unreachable!(concat!("invalid ", stringify!(#ident), " tag: {}"), number),
}
}
/// Returns the encoded length of the message without a length delimiter.
#[inline]
pub fn encoded_len(&self) -> usize {
match *self {
#(#encoded_len,)*
}
}
}
};
let expanded = if skip_debug {
expanded
} else {
let debug = fields.iter().map(|(variant_ident, field, deprecated)| {
let wrapper = field.debug(&prost_path, quote!(*value));
quote!(#deprecated #ident::#variant_ident(ref value) => {
let wrapper = #wrapper;
f.debug_tuple(stringify!(#variant_ident))
.field(&wrapper)
.finish()
})
});
quote! {
#expanded
impl #impl_generics ::core::fmt::Debug for #ident #ty_generics #where_clause {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match *self {
#(#debug,)*
}
}
}
}
};
Ok(expanded)
}
#[proc_macro_derive(Oneof, attributes(prost))]
pub fn oneof(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
try_oneof(input.into()).unwrap().into()
}
/// Get the items belonging to the 'prost' list attribute, e.g. `#[prost(foo, bar="baz")]`.
fn prost_attrs(attrs: Vec<Attribute>) -> Result<Vec<Meta>, Error> {
let mut result = Vec::new();
for attr in attrs.iter() {
if let Meta::List(meta_list) = &attr.meta {
if meta_list.path.is_ident("prost") {
result.extend(
meta_list
.parse_args_with(Punctuated::<Meta, Token![,]>::parse_terminated)?
.into_iter(),
)
}
}
}
Ok(result)
}
/// Extracts the path to prost specified using the `#[prost(prost_path = "...")]` attribute. When
/// missing, falls back to default, which is `::prost`.
fn get_prost_path(attrs: &[Meta]) -> Result<Path, Error> {
let mut prost_path = None;
for attr in attrs {
match attr {
Meta::NameValue(MetaNameValue {
path,
value:
Expr::Lit(ExprLit {
lit: Lit::Str(lit), ..
}),
..
}) if path.is_ident("prost_path") => {
let path: Path =
syn::parse_str(&lit.value()).context("invalid prost_path argument")?;
set_option(&mut prost_path, path, "duplicate prost_path attributes")?;
}
_ => continue,
}
}
let prost_path =
prost_path.unwrap_or_else(|| syn::parse_str("::prost").expect("default prost_path"));
Ok(prost_path)
}
struct Attributes {
skip_debug: bool,
prost_path: Path,
}
impl Attributes {
fn new(attrs: Vec<Attribute>) -> Result<Self, Error> {
syn::custom_keyword!(skip_debug);
let skip_debug = attrs.iter().any(|a| a.parse_args::<skip_debug>().is_ok());
let attrs = prost_attrs(attrs)?;
let prost_path = get_prost_path(&attrs)?;
Ok(Self {
skip_debug,
prost_path,
})
}
}
#[cfg(test)]
mod test {
use crate::{try_message, try_oneof};
use quote::quote;
#[test]
fn test_rejects_colliding_message_fields() {
let output = try_message(quote!(
struct Invalid {
#[prost(bool, tag = "1")]
a: bool,
#[prost(oneof = "super::Whatever", tags = "4, 5, 1")]
b: Option<super::Whatever>,
}
));
assert_eq!(
output
.expect_err("did not reject colliding message fields")
.to_string(),
"message Invalid has multiple fields with tag 1"
);
}
#[test]
fn test_rejects_colliding_oneof_variants() {
let output = try_oneof(quote!(
pub enum Invalid {
#[prost(bool, tag = "1")]
A(bool),
#[prost(bool, tag = "3")]
B(bool),
#[prost(bool, tag = "1")]
C(bool),
}
));
assert_eq!(
output
.expect_err("did not reject colliding oneof variants")
.to_string(),
"invalid oneof Invalid: multiple variants have tag 1"
);
}
#[test]
fn test_rejects_multiple_tags_oneof_variant() {
let output = try_oneof(quote!(
enum What {
#[prost(bool, tag = "1", tag = "2")]
A(bool),
}
));
assert_eq!(
output
.expect_err("did not reject multiple tags on oneof variant")
.to_string(),
"duplicate tag attributes: 1 and 2"
);
let output = try_oneof(quote!(
enum What {
#[prost(bool, tag = "3")]
#[prost(tag = "4")]
A(bool),
}
));
assert!(output.is_err());
assert_eq!(
output
.expect_err("did not reject multiple tags on oneof variant")
.to_string(),
"duplicate tag attributes: 3 and 4"
);
let output = try_oneof(quote!(
enum What {
#[prost(bool, tags = "5,6")]
A(bool),
}
));
assert!(output.is_err());
assert_eq!(
output
.expect_err("did not reject multiple tags on oneof variant")
.to_string(),
"unknown attribute(s): #[prost(tags = \"5,6\")]"
);
}
}

View File

@@ -0,0 +1,34 @@
[package]
name = "prost-types"
readme = "README.md"
description = "Prost definitions of Protocol Buffers well known types."
version = "0.14.1"
authors = [
"Dan Burkert <dan@danburkert.com>",
"Lucio Franco <luciofranco14@gmail.com>",
"Casper Meijn <casper@meijn.net>",
"Tokio Contributors <team@tokio.rs>",
]
license = "Apache-2.0"
repository = "https://github.com/tokio-rs/prost"
edition = "2021"
rust-version = "1.71.1"
[lib]
doctest = false
[features]
default = ["std"]
std = ["prost/std"]
arbitrary = ["dep:arbitrary"]
[dependencies]
prost = { version = "0.14.1", path = "../prost-0.14.1", default-features = false, features = ["derive"] }
arbitrary = { version = "1.4", features = ["derive"], optional = true }
chrono = { version = "0.4.34", default-features = false, optional = true }
[dev-dependencies]
proptest = "1"
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] }

29
patch/prost-types/Cargo.toml.orig generated Normal file
View File

@@ -0,0 +1,29 @@
[package]
name = "prost-types"
readme = "README.md"
description = "Prost definitions of Protocol Buffers well known types."
version.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
edition.workspace = true
rust-version.workspace = true
[lib]
doctest = false
[features]
default = ["std"]
std = ["prost/std"]
arbitrary = ["dep:arbitrary"]
[dependencies]
prost = { version = "0.14.1", path = "../prost", default-features = false, features = ["derive"] }
arbitrary = { version = "1.4", features = ["derive"], optional = true }
chrono = { version = "0.4.34", default-features = false, optional = true }
[dev-dependencies]
proptest = "1"
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] }

View File

View File

@@ -0,0 +1,21 @@
[![Documentation](https://docs.rs/prost-types/badge.svg)](https://docs.rs/prost-types/)
[![Crate](https://img.shields.io/crates/v/prost-types.svg)](https://crates.io/crates/prost-types)
# `prost-types`
Prost definitions of Protocol Buffers well known types. See the [Protobuf reference][1] for more
information about well known types.
[1]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf
## License
`prost-types` is distributed under the terms of the Apache License (Version 2.0).
`prost-types` includes code imported from the Protocol Buffers project, which is
included under its original ([BSD][2]) license.
[2]: https://github.com/google/protobuf/blob/master/LICENSE
See [LICENSE](./LICENSE) for details.
Copyright 2017 Dan Burkert

View File

@@ -0,0 +1,69 @@
use super::*;
impl Any {
/// Serialize the given message type `M` as [`Any`].
pub fn from_msg<M>(msg: &M) -> Result<Self, EncodeError>
where
M: Name,
{
let type_url = M::type_url();
let mut value = Vec::new();
Message::encode(msg, &mut value)?;
Ok(Any { type_url, value })
}
/// Decode the given message type `M` from [`Any`], validating that it has
/// the expected type URL.
pub fn to_msg<M>(&self) -> Result<M, DecodeError>
where
M: Default + Name + Sized,
{
let expected_type_url = M::type_url();
if let (Some(expected), Some(actual)) = (
TypeUrl::new(&expected_type_url),
TypeUrl::new(&self.type_url),
) {
if expected == actual {
return M::decode(self.value.as_slice());
}
}
let mut err = DecodeError::new(format!(
"expected type URL: \"{}\" (got: \"{}\")",
expected_type_url, &self.type_url
));
err.push("unexpected type URL", "type_url");
Err(err)
}
}
impl Name for Any {
const PACKAGE: &'static str = PACKAGE;
const NAME: &'static str = "Any";
fn type_url() -> String {
type_url_for::<Self>()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn check_any_serialization() {
let message = Timestamp::date(2000, 1, 1).unwrap();
let any = Any::from_msg(&message).unwrap();
assert_eq!(
&any.type_url,
"type.googleapis.com/google.protobuf.Timestamp"
);
let message2 = any.to_msg::<Timestamp>().unwrap();
assert_eq!(message, message2);
// Wrong type URL
assert!(any.to_msg::<Duration>().is_err());
}
}

View File

@@ -0,0 +1,175 @@
// This file is @generated by prost-build.
/// The version number of protocol compiler.
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Version {
#[prost(int32, optional, tag = "1")]
pub major: ::core::option::Option<i32>,
#[prost(int32, optional, tag = "2")]
pub minor: ::core::option::Option<i32>,
#[prost(int32, optional, tag = "3")]
pub patch: ::core::option::Option<i32>,
/// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
/// be empty for mainline stable releases.
#[prost(string, optional, tag = "4")]
pub suffix: ::core::option::Option<::prost::alloc::string::String>,
}
/// An encoded CodeGeneratorRequest is written to the plugin's stdin.
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CodeGeneratorRequest {
/// The .proto files that were explicitly listed on the command-line. The
/// code generator should generate code only for these files. Each file's
/// descriptor will be included in proto_file, below.
#[prost(string, repeated, tag = "1")]
pub file_to_generate: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// The generator parameter passed on the command-line.
#[prost(string, optional, tag = "2")]
pub parameter: ::core::option::Option<::prost::alloc::string::String>,
/// FileDescriptorProtos for all files in files_to_generate and everything
/// they import. The files will appear in topological order, so each file
/// appears before any file that imports it.
///
/// protoc guarantees that all proto_files will be written after
/// the fields above, even though this is not technically guaranteed by the
/// protobuf wire format. This theoretically could allow a plugin to stream
/// in the FileDescriptorProtos and handle them one by one rather than read
/// the entire set into memory at once. However, as of this writing, this
/// is not similarly optimized on protoc's end -- it will store all fields in
/// memory at once before sending them to the plugin.
///
/// Type names of fields and extensions in the FileDescriptorProto are always
/// fully qualified.
#[prost(message, repeated, tag = "15")]
pub proto_file: ::prost::alloc::vec::Vec<super::FileDescriptorProto>,
/// The version number of protocol compiler.
#[prost(message, optional, tag = "3")]
pub compiler_version: ::core::option::Option<Version>,
}
/// The plugin writes an encoded CodeGeneratorResponse to stdout.
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CodeGeneratorResponse {
/// Error message. If non-empty, code generation failed. The plugin process
/// should exit with status code zero even if it reports an error in this way.
///
/// This should be used to indicate errors in .proto files which prevent the
/// code generator from generating correct code. Errors which indicate a
/// problem in protoc itself -- such as the input CodeGeneratorRequest being
/// unparseable -- should be reported by writing a message to stderr and
/// exiting with a non-zero status code.
#[prost(string, optional, tag = "1")]
pub error: ::core::option::Option<::prost::alloc::string::String>,
/// A bitmask of supported features that the code generator supports.
/// This is a bitwise "or" of values from the Feature enum.
#[prost(uint64, optional, tag = "2")]
pub supported_features: ::core::option::Option<u64>,
#[prost(message, repeated, tag = "15")]
pub file: ::prost::alloc::vec::Vec<code_generator_response::File>,
}
/// Nested message and enum types in `CodeGeneratorResponse`.
pub mod code_generator_response {
/// Represents a single generated file.
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct File {
/// The file name, relative to the output directory. The name must not
/// contain "." or ".." components and must be relative, not be absolute (so,
/// the file cannot lie outside the output directory). "/" must be used as
/// the path separator, not "".
///
/// If the name is omitted, the content will be appended to the previous
/// file. This allows the generator to break large files into small chunks,
/// and allows the generated text to be streamed back to protoc so that large
/// files need not reside completely in memory at one time. Note that as of
/// this writing protoc does not optimize for this -- it will read the entire
/// CodeGeneratorResponse before writing files to disk.
#[prost(string, optional, tag = "1")]
pub name: ::core::option::Option<::prost::alloc::string::String>,
/// If non-empty, indicates that the named file should already exist, and the
/// content here is to be inserted into that file at a defined insertion
/// point. This feature allows a code generator to extend the output
/// produced by another code generator. The original generator may provide
/// insertion points by placing special annotations in the file that look
/// like:
/// @@protoc_insertion_point(NAME)
/// The annotation can have arbitrary text before and after it on the line,
/// which allows it to be placed in a comment. NAME should be replaced with
/// an identifier naming the point -- this is what other generators will use
/// as the insertion_point. Code inserted at this point will be placed
/// immediately above the line containing the insertion point (thus multiple
/// insertions to the same point will come out in the order they were added).
/// The double-@ is intended to make it unlikely that the generated code
/// could contain things that look like insertion points by accident.
///
/// For example, the C++ code generator places the following line in the
/// .pb.h files that it generates:
/// // @@protoc_insertion_point(namespace_scope)
/// This line appears within the scope of the file's package namespace, but
/// outside of any particular class. Another plugin can then specify the
/// insertion_point "namespace_scope" to generate additional classes or
/// other declarations that should be placed in this scope.
///
/// Note that if the line containing the insertion point begins with
/// whitespace, the same whitespace will be added to every line of the
/// inserted text. This is useful for languages like Python, where
/// indentation matters. In these languages, the insertion point comment
/// should be indented the same amount as any inserted code will need to be
/// in order to work correctly in that context.
///
/// The code generator that generates the initial file and the one which
/// inserts into it must both run as part of a single invocation of protoc.
/// Code generators are executed in the order in which they appear on the
/// command line.
///
/// If |insertion_point| is present, |name| must also be present.
#[prost(string, optional, tag = "2")]
pub insertion_point: ::core::option::Option<::prost::alloc::string::String>,
/// The file contents.
#[prost(string, optional, tag = "15")]
pub content: ::core::option::Option<::prost::alloc::string::String>,
/// Information describing the file content being inserted. If an insertion
/// point is used, this information will be appropriately offset and inserted
/// into the code generation metadata for the generated files.
#[prost(message, optional, tag = "16")]
pub generated_code_info: ::core::option::Option<super::super::GeneratedCodeInfo>,
}
/// Sync with code_generator.h.
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum Feature {
None = 0,
Proto3Optional = 1,
}
impl Feature {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::None => "FEATURE_NONE",
Self::Proto3Optional => "FEATURE_PROTO3_OPTIONAL",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"FEATURE_NONE" => Some(Self::None),
"FEATURE_PROTO3_OPTIONAL" => Some(Self::Proto3Optional),
_ => None,
}
}
}
}

View File

@@ -0,0 +1,62 @@
use crate::protobuf::Value;
use crate::value;
use crate::String;
use crate::Vec;
use ::prost::alloc::collections::BTreeMap;
impl From<value::Kind> for Value {
fn from(value: value::Kind) -> Self {
Value { kind: Some(value) }
}
}
macro_rules! impl_number_value {
($t: ty) => {
impl From<$t> for Value {
fn from(value: $t) -> Self {
value::Kind::NumberValue(value.into()).into()
}
}
};
}
impl_number_value!(u8);
impl_number_value!(u16);
impl_number_value!(u32);
impl_number_value!(i8);
impl_number_value!(i16);
impl_number_value!(i32);
impl_number_value!(f32);
impl_number_value!(f64);
impl From<bool> for Value {
fn from(value: bool) -> Self {
value::Kind::BoolValue(value).into()
}
}
impl From<String> for Value {
fn from(value: String) -> Self {
value::Kind::StringValue(value).into()
}
}
impl From<&str> for Value {
fn from(value: &str) -> Self {
value::Kind::StringValue(value.into()).into()
}
}
impl From<Vec<Value>> for Value {
fn from(value: Vec<Value>) -> Self {
value::Kind::ListValue(crate::protobuf::ListValue { values: value }).into()
}
}
impl From<BTreeMap<String, Value>> for Value {
fn from(value: BTreeMap<String, Value>) -> Self {
value::Kind::StructValue(crate::protobuf::Struct { fields: value }).into()
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,481 @@
use super::*;
impl Duration {
/// Normalizes the duration to a canonical format.
///
/// Based on [`google::protobuf::util::CreateNormalized`][1].
///
/// [1]: https://github.com/google/protobuf/blob/v3.3.2/src/google/protobuf/util/time_util.cc#L79-L100
pub fn normalize(&mut self) {
// Make sure nanos is in the range.
if self.nanos <= -NANOS_PER_SECOND || self.nanos >= NANOS_PER_SECOND {
if let Some(seconds) = self
.seconds
.checked_add((self.nanos / NANOS_PER_SECOND) as i64)
{
self.seconds = seconds;
self.nanos %= NANOS_PER_SECOND;
} else if self.nanos < 0 {
// Negative overflow! Set to the least normal value.
self.seconds = i64::MIN;
self.nanos = -NANOS_MAX;
} else {
// Positive overflow! Set to the greatest normal value.
self.seconds = i64::MAX;
self.nanos = NANOS_MAX;
}
}
// nanos should have the same sign as seconds.
if self.seconds < 0 && self.nanos > 0 {
if let Some(seconds) = self.seconds.checked_add(1) {
self.seconds = seconds;
self.nanos -= NANOS_PER_SECOND;
} else {
// Positive overflow! Set to the greatest normal value.
debug_assert_eq!(self.seconds, i64::MAX);
self.nanos = NANOS_MAX;
}
} else if self.seconds > 0 && self.nanos < 0 {
if let Some(seconds) = self.seconds.checked_sub(1) {
self.seconds = seconds;
self.nanos += NANOS_PER_SECOND;
} else {
// Negative overflow! Set to the least normal value.
debug_assert_eq!(self.seconds, i64::MIN);
self.nanos = -NANOS_MAX;
}
}
// TODO: should this be checked?
// debug_assert!(self.seconds >= -315_576_000_000 && self.seconds <= 315_576_000_000,
// "invalid duration: {:?}", self);
}
/// Returns a normalized copy of the duration to a canonical format.
///
/// Based on [`google::protobuf::util::CreateNormalized`][1].
///
/// [1]: https://github.com/google/protobuf/blob/v3.3.2/src/google/protobuf/util/time_util.cc#L79-L100
pub fn normalized(&self) -> Self {
let mut result = *self;
result.normalize();
result
}
}
// impl Name for Duration {
// const PACKAGE: &'static str = PACKAGE;
// const NAME: &'static str = "Duration";
// fn type_url() -> String {
// type_url_for::<Self>()
// }
// }
impl TryFrom<time::Duration> for Duration {
type Error = DurationError;
/// Converts a `std::time::Duration` to a `Duration`, failing if the duration is too large.
fn try_from(duration: time::Duration) -> Result<Duration, DurationError> {
let seconds = i64::try_from(duration.as_secs()).map_err(|_| DurationError::OutOfRange)?;
let nanos = duration.subsec_nanos() as i32;
let duration = Duration { seconds, nanos };
Ok(duration.normalized())
}
}
impl TryFrom<Duration> for time::Duration {
type Error = DurationError;
/// Converts a `Duration` to a `std::time::Duration`, failing if the duration is negative.
fn try_from(mut duration: Duration) -> Result<time::Duration, DurationError> {
duration.normalize();
if duration.seconds >= 0 && duration.nanos >= 0 {
Ok(time::Duration::new(
duration.seconds as u64,
duration.nanos as u32,
))
} else {
Err(DurationError::NegativeDuration(time::Duration::new(
(-duration.seconds) as u64,
(-duration.nanos) as u32,
)))
}
}
}
impl fmt::Display for Duration {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let d = self.normalized();
if self.seconds < 0 || self.nanos < 0 {
write!(f, "-")?;
}
write!(f, "{}", d.seconds.abs())?;
// Format subseconds to either nothing, millis, micros, or nanos.
let nanos = d.nanos.abs();
if nanos == 0 {
write!(f, "s")
} else if nanos % 1_000_000 == 0 {
write!(f, ".{:03}s", nanos / 1_000_000)
} else if nanos % 1_000 == 0 {
write!(f, ".{:06}s", nanos / 1_000)
} else {
write!(f, ".{nanos:09}s")
}
}
}
/// A duration handling error.
#[derive(Debug, PartialEq)]
#[non_exhaustive]
pub enum DurationError {
/// Indicates failure to parse a [`Duration`] from a string.
///
/// The [`Duration`] string format is specified in the [Protobuf JSON mapping specification][1].
///
/// [1]: https://developers.google.com/protocol-buffers/docs/proto3#json
ParseFailure,
/// Indicates failure to convert a `prost_types::Duration` to a `std::time::Duration` because
/// the duration is negative. The included `std::time::Duration` matches the magnitude of the
/// original negative `prost_types::Duration`.
NegativeDuration(time::Duration),
/// Indicates failure to convert a `std::time::Duration` to a `prost_types::Duration`.
///
/// Converting a `std::time::Duration` to a `prost_types::Duration` fails if the magnitude
/// exceeds that representable by `prost_types::Duration`.
OutOfRange,
}
impl fmt::Display for DurationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DurationError::ParseFailure => write!(f, "failed to parse duration"),
DurationError::NegativeDuration(duration) => {
write!(f, "failed to convert negative duration: {duration:?}")
}
DurationError::OutOfRange => {
write!(f, "failed to convert duration out of range")
}
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for DurationError {}
impl FromStr for Duration {
type Err = DurationError;
fn from_str(s: &str) -> Result<Duration, DurationError> {
datetime::parse_duration(s).ok_or(DurationError::ParseFailure)
}
}
#[cfg(feature = "chrono")]
mod chrono {
use ::chrono::TimeDelta;
use super::*;
impl From<::chrono::TimeDelta> for Duration {
fn from(value: ::chrono::TimeDelta) -> Self {
let mut result = Self {
seconds: value.num_seconds(),
nanos: value.subsec_nanos(),
};
result.normalize();
result
}
}
impl TryFrom<Duration> for ::chrono::TimeDelta {
type Error = DurationError;
fn try_from(mut value: Duration) -> Result<TimeDelta, duration::DurationError> {
value.normalize();
let seconds = TimeDelta::try_seconds(value.seconds).ok_or(DurationError::OutOfRange)?;
let nanos = TimeDelta::nanoseconds(value.nanos.into());
seconds.checked_add(&nanos).ok_or(DurationError::OutOfRange)
}
}
}
#[cfg(kani)]
mod proofs {
use super::*;
#[cfg(feature = "std")]
#[kani::proof]
fn check_duration_std_roundtrip() {
let seconds = kani::any();
let nanos = kani::any();
kani::assume(nanos < 1_000_000_000);
let std_duration = std::time::Duration::new(seconds, nanos);
let Ok(prost_duration) = Duration::try_from(std_duration) else {
// Test case not valid: duration out of range
return;
};
assert_eq!(
time::Duration::try_from(prost_duration).unwrap(),
std_duration
);
if std_duration != time::Duration::default() {
let neg_prost_duration = Duration {
seconds: -prost_duration.seconds,
nanos: -prost_duration.nanos,
};
assert!(matches!(
time::Duration::try_from(neg_prost_duration),
Err(DurationError::NegativeDuration(d)) if d == std_duration,
))
}
}
#[cfg(feature = "std")]
#[kani::proof]
fn check_duration_std_roundtrip_nanos() {
let seconds = 0;
let nanos = kani::any();
let std_duration = std::time::Duration::new(seconds, nanos);
let Ok(prost_duration) = Duration::try_from(std_duration) else {
// Test case not valid: duration out of range
return;
};
assert_eq!(
time::Duration::try_from(prost_duration).unwrap(),
std_duration
);
if std_duration != time::Duration::default() {
let neg_prost_duration = Duration {
seconds: -prost_duration.seconds,
nanos: -prost_duration.nanos,
};
assert!(matches!(
time::Duration::try_from(neg_prost_duration),
Err(DurationError::NegativeDuration(d)) if d == std_duration,
))
}
}
#[cfg(feature = "chrono")]
#[kani::proof]
fn check_duration_chrono_roundtrip() {
let seconds = kani::any();
let nanos = kani::any();
let prost_duration = Duration { seconds, nanos };
match ::chrono::TimeDelta::try_from(prost_duration) {
Err(DurationError::OutOfRange) => {
// Test case not valid: duration out of range
return;
}
Err(err) => {
panic!("Unexpected error: {err}")
}
Ok(chrono_duration) => {
let mut normalized_prost_duration = prost_duration;
normalized_prost_duration.normalize();
assert_eq!(
Duration::try_from(chrono_duration).unwrap(),
normalized_prost_duration
);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "std")]
#[test]
fn test_duration_from_str() {
assert_eq!(
Duration::from_str("0s"),
Ok(Duration {
seconds: 0,
nanos: 0
})
);
assert_eq!(
Duration::from_str("123s"),
Ok(Duration {
seconds: 123,
nanos: 0
})
);
assert_eq!(
Duration::from_str("0.123s"),
Ok(Duration {
seconds: 0,
nanos: 123_000_000
})
);
assert_eq!(
Duration::from_str("-123s"),
Ok(Duration {
seconds: -123,
nanos: 0
})
);
assert_eq!(
Duration::from_str("-0.123s"),
Ok(Duration {
seconds: 0,
nanos: -123_000_000
})
);
assert_eq!(
Duration::from_str("22041211.6666666666666s"),
Ok(Duration {
seconds: 22041211,
nanos: 666_666_666
})
);
}
#[cfg(feature = "std")]
#[test]
fn test_format_duration() {
assert_eq!(
"0s",
Duration {
seconds: 0,
nanos: 0
}
.to_string()
);
assert_eq!(
"123s",
Duration {
seconds: 123,
nanos: 0
}
.to_string()
);
assert_eq!(
"0.123s",
Duration {
seconds: 0,
nanos: 123_000_000
}
.to_string()
);
assert_eq!(
"-123s",
Duration {
seconds: -123,
nanos: 0
}
.to_string()
);
assert_eq!(
"-0.123s",
Duration {
seconds: 0,
nanos: -123_000_000
}
.to_string()
);
}
#[cfg(feature = "std")]
#[test]
fn check_duration_try_from_negative_nanos() {
let seconds: u64 = 0;
let nanos: u32 = 1;
let std_duration = std::time::Duration::new(seconds, nanos);
let neg_prost_duration = Duration {
seconds: 0,
nanos: -1,
};
assert!(matches!(
time::Duration::try_from(neg_prost_duration),
Err(DurationError::NegativeDuration(d)) if d == std_duration,
))
}
#[test]
fn check_duration_normalize() {
#[rustfmt::skip] // Don't mangle the table formatting.
let cases = [
// --- Table of test cases ---
// test seconds test nanos expected seconds expected nanos
(line!(), 0, 0, 0, 0),
(line!(), 1, 1, 1, 1),
(line!(), -1, -1, -1, -1),
(line!(), 0, 999_999_999, 0, 999_999_999),
(line!(), 0, -999_999_999, 0, -999_999_999),
(line!(), 0, 1_000_000_000, 1, 0),
(line!(), 0, -1_000_000_000, -1, 0),
(line!(), 0, 1_000_000_001, 1, 1),
(line!(), 0, -1_000_000_001, -1, -1),
(line!(), -1, 1, 0, -999_999_999),
(line!(), 1, -1, 0, 999_999_999),
(line!(), -1, 1_000_000_000, 0, 0),
(line!(), 1, -1_000_000_000, 0, 0),
(line!(), i64::MIN , 0, i64::MIN , 0),
(line!(), i64::MIN + 1, 0, i64::MIN + 1, 0),
(line!(), i64::MIN , 1, i64::MIN + 1, -999_999_999),
(line!(), i64::MIN , 1_000_000_000, i64::MIN + 1, 0),
(line!(), i64::MIN , -1_000_000_000, i64::MIN , -999_999_999),
(line!(), i64::MIN + 1, -1_000_000_000, i64::MIN , 0),
(line!(), i64::MIN + 2, -1_000_000_000, i64::MIN + 1, 0),
(line!(), i64::MIN , -1_999_999_998, i64::MIN , -999_999_999),
(line!(), i64::MIN + 1, -1_999_999_998, i64::MIN , -999_999_998),
(line!(), i64::MIN + 2, -1_999_999_998, i64::MIN + 1, -999_999_998),
(line!(), i64::MIN , -1_999_999_999, i64::MIN , -999_999_999),
(line!(), i64::MIN + 1, -1_999_999_999, i64::MIN , -999_999_999),
(line!(), i64::MIN + 2, -1_999_999_999, i64::MIN + 1, -999_999_999),
(line!(), i64::MIN , -2_000_000_000, i64::MIN , -999_999_999),
(line!(), i64::MIN + 1, -2_000_000_000, i64::MIN , -999_999_999),
(line!(), i64::MIN + 2, -2_000_000_000, i64::MIN , 0),
(line!(), i64::MIN , -999_999_998, i64::MIN , -999_999_998),
(line!(), i64::MIN + 1, -999_999_998, i64::MIN + 1, -999_999_998),
(line!(), i64::MAX , 0, i64::MAX , 0),
(line!(), i64::MAX - 1, 0, i64::MAX - 1, 0),
(line!(), i64::MAX , -1, i64::MAX - 1, 999_999_999),
(line!(), i64::MAX , 1_000_000_000, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 1_000_000_000, i64::MAX , 0),
(line!(), i64::MAX - 2, 1_000_000_000, i64::MAX - 1, 0),
(line!(), i64::MAX , 1_999_999_998, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 1_999_999_998, i64::MAX , 999_999_998),
(line!(), i64::MAX - 2, 1_999_999_998, i64::MAX - 1, 999_999_998),
(line!(), i64::MAX , 1_999_999_999, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 1_999_999_999, i64::MAX , 999_999_999),
(line!(), i64::MAX - 2, 1_999_999_999, i64::MAX - 1, 999_999_999),
(line!(), i64::MAX , 2_000_000_000, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 2_000_000_000, i64::MAX , 999_999_999),
(line!(), i64::MAX - 2, 2_000_000_000, i64::MAX , 0),
(line!(), i64::MAX , 999_999_998, i64::MAX , 999_999_998),
(line!(), i64::MAX - 1, 999_999_998, i64::MAX - 1, 999_999_998),
];
for case in cases.iter() {
let test_duration = Duration {
seconds: case.1,
nanos: case.2,
};
assert_eq!(
test_duration.normalized(),
Duration {
seconds: case.3,
nanos: case.4,
},
"test case on line {} doesn't match",
case.0,
);
}
}
}

View File

@@ -0,0 +1,84 @@
#![doc(html_root_url = "https://docs.rs/prost-types/0.14.1")]
//! Protocol Buffers well-known types.
//!
//! Note that the documentation for the types defined in this crate are generated from the Protobuf
//! definitions, so code examples are not in Rust.
//!
//! See the [Protobuf reference][1] for more information about well-known types.
//!
//! ## Any
//!
//! The well-known [`Any`] type contains an arbitrary serialized message along with a URL that
//! describes the type of the serialized message. Every message that also implements [`Name`]
//! can be serialized to and deserialized from [`Any`].
//!
//! ### Serialization
//!
//! A message can be serialized using [`Any::from_msg`].
//!
//! ```rust
//! let message = Timestamp::date(2000, 1, 1).unwrap();
//! let any = Any::from_msg(&message).unwrap();
//! ```
//!
//! ### Deserialization
//!
//! A message can be deserialized using [`Any::to_msg`].
//!
//! ```rust
//! # let message = Timestamp::date(2000, 1, 1).unwrap();
//! # let any = Any::from_msg(&message).unwrap();
//! #
//! let message = any.to_msg::<Timestamp>().unwrap();
//! ```
//!
//! ## Feature Flags
//! - `std`: Enable integration with standard library. Disable this feature for `no_std` support. This feature is enabled by default.
//! - `arbitrary`: Enable integration with crate `arbitrary`. All types on this crate will implement `trait Arbitrary`.
//! - `chrono`: Enable integration with crate `chrono`. Time related types implement conversions to/from their `chrono` equivalent.
//!
//! [1]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf
#![cfg_attr(not(feature = "std"), no_std)]
#[rustfmt::skip]
pub mod compiler;
mod datetime;
#[rustfmt::skip]
mod protobuf;
use core::convert::TryFrom;
use core::fmt;
use core::str::FromStr;
use core::time;
use prost::alloc::format;
use prost::alloc::string::String;
use prost::alloc::vec::Vec;
use prost::{DecodeError, EncodeError, Message};
pub use protobuf::*;
// The Protobuf `Duration` and `Timestamp` types can't delegate to the standard library equivalents
// because the Protobuf versions are signed. To make them easier to work with, `From` conversions
// are defined in both directions.
const NANOS_PER_SECOND: i32 = 1_000_000_000;
const NANOS_MAX: i32 = NANOS_PER_SECOND - 1;
const PACKAGE: &str = "google.protobuf";
// mod any;
mod duration;
pub use duration::DurationError;
mod timestamp;
pub use timestamp::TimestampError;
mod type_url;
// pub(crate) use type_url::{type_url_for, TypeUrl};
pub(crate) use type_url::TypeUrl;
mod conversions;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,431 @@
use super::*;
impl Timestamp {
/// Normalizes the timestamp to a canonical format.
///
/// Based on [`google::protobuf::util::CreateNormalized`][1].
///
/// [1]: https://github.com/google/protobuf/blob/v3.3.2/src/google/protobuf/util/time_util.cc#L59-L77
pub fn normalize(&mut self) {
// Make sure nanos is in the range.
if self.nanos <= -NANOS_PER_SECOND || self.nanos >= NANOS_PER_SECOND {
if let Some(seconds) = self
.seconds
.checked_add((self.nanos / NANOS_PER_SECOND) as i64)
{
self.seconds = seconds;
self.nanos %= NANOS_PER_SECOND;
} else if self.nanos < 0 {
// Negative overflow! Set to the earliest normal value.
self.seconds = i64::MIN;
self.nanos = 0;
} else {
// Positive overflow! Set to the latest normal value.
self.seconds = i64::MAX;
self.nanos = 999_999_999;
}
}
// For Timestamp nanos should be in the range [0, 999999999].
if self.nanos < 0 {
if let Some(seconds) = self.seconds.checked_sub(1) {
self.seconds = seconds;
self.nanos += NANOS_PER_SECOND;
} else {
// Negative overflow! Set to the earliest normal value.
debug_assert_eq!(self.seconds, i64::MIN);
self.nanos = 0;
}
}
// TODO: should this be checked?
// debug_assert!(self.seconds >= -62_135_596_800 && self.seconds <= 253_402_300_799,
// "invalid timestamp: {:?}", self);
}
/// Normalizes the timestamp to a canonical format, returning the original value if it cannot be
/// normalized.
///
/// Normalization is based on [`google::protobuf::util::CreateNormalized`][1].
///
/// [1]: https://github.com/google/protobuf/blob/v3.3.2/src/google/protobuf/util/time_util.cc#L59-L77
pub fn try_normalize(mut self) -> Result<Timestamp, Timestamp> {
let before = self;
self.normalize();
// If the seconds value has changed, and is either i64::MIN or i64::MAX, then the timestamp
// normalization overflowed.
if (self.seconds == i64::MAX || self.seconds == i64::MIN) && self.seconds != before.seconds
{
Err(before)
} else {
Ok(self)
}
}
/// Return a normalized copy of the timestamp to a canonical format.
///
/// Based on [`google::protobuf::util::CreateNormalized`][1].
///
/// [1]: https://github.com/google/protobuf/blob/v3.3.2/src/google/protobuf/util/time_util.cc#L59-L77
pub fn normalized(&self) -> Self {
let mut result = *self;
result.normalize();
result
}
/// Creates a new `Timestamp` at the start of the provided UTC date.
pub fn date(year: i64, month: u8, day: u8) -> Result<Timestamp, TimestampError> {
Timestamp::date_time_nanos(year, month, day, 0, 0, 0, 0)
}
/// Creates a new `Timestamp` instance with the provided UTC date and time.
pub fn date_time(
year: i64,
month: u8,
day: u8,
hour: u8,
minute: u8,
second: u8,
) -> Result<Timestamp, TimestampError> {
Timestamp::date_time_nanos(year, month, day, hour, minute, second, 0)
}
/// Creates a new `Timestamp` instance with the provided UTC date and time.
pub fn date_time_nanos(
year: i64,
month: u8,
day: u8,
hour: u8,
minute: u8,
second: u8,
nanos: u32,
) -> Result<Timestamp, TimestampError> {
let date_time = datetime::DateTime {
year,
month,
day,
hour,
minute,
second,
nanos,
};
Timestamp::try_from(date_time)
}
}
// impl Name for Timestamp {
// const PACKAGE: &'static str = PACKAGE;
// const NAME: &'static str = "Timestamp";
// fn type_url() -> String {
// type_url_for::<Self>()
// }
// }
#[cfg(feature = "std")]
impl From<std::time::SystemTime> for Timestamp {
fn from(system_time: std::time::SystemTime) -> Timestamp {
let (seconds, nanos) = match system_time.duration_since(std::time::UNIX_EPOCH) {
Ok(duration) => {
let seconds = i64::try_from(duration.as_secs()).unwrap();
(seconds, duration.subsec_nanos() as i32)
}
Err(error) => {
let duration = error.duration();
let seconds = i64::try_from(duration.as_secs()).unwrap();
let nanos = duration.subsec_nanos() as i32;
if nanos == 0 {
(-seconds, 0)
} else {
(-seconds - 1, 1_000_000_000 - nanos)
}
}
};
Timestamp { seconds, nanos }
}
}
/// A timestamp handling error.
#[derive(Debug, PartialEq)]
#[non_exhaustive]
pub enum TimestampError {
/// Indicates that a [`Timestamp`] could not be converted to
/// [`SystemTime`][std::time::SystemTime] because it is out of range.
///
/// The range of times that can be represented by `SystemTime` depends on the platform. All
/// `Timestamp`s are likely representable on 64-bit Unix-like platforms, but other platforms,
/// such as Windows and 32-bit Linux, may not be able to represent the full range of
/// `Timestamp`s.
OutOfSystemRange(Timestamp),
/// An error indicating failure to parse a timestamp in RFC-3339 format.
ParseFailure,
/// Indicates an error when constructing a timestamp due to invalid date or time data.
InvalidDateTime,
}
impl fmt::Display for TimestampError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimestampError::OutOfSystemRange(timestamp) => {
write!(
f,
"{timestamp} is not representable as a `SystemTime` because it is out of range",
)
}
TimestampError::ParseFailure => {
write!(f, "failed to parse RFC-3339 formatted timestamp")
}
TimestampError::InvalidDateTime => {
write!(f, "invalid date or time")
}
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for TimestampError {}
#[cfg(feature = "std")]
impl TryFrom<Timestamp> for std::time::SystemTime {
type Error = TimestampError;
fn try_from(mut timestamp: Timestamp) -> Result<std::time::SystemTime, Self::Error> {
let orig_timestamp = timestamp;
timestamp.normalize();
let system_time = if timestamp.seconds >= 0 {
std::time::UNIX_EPOCH.checked_add(time::Duration::from_secs(timestamp.seconds as u64))
} else {
std::time::UNIX_EPOCH.checked_sub(time::Duration::from_secs(
timestamp
.seconds
.checked_neg()
.ok_or(TimestampError::OutOfSystemRange(timestamp))? as u64,
))
};
let system_time = system_time.and_then(|system_time| {
system_time.checked_add(time::Duration::from_nanos(timestamp.nanos as u64))
});
system_time.ok_or(TimestampError::OutOfSystemRange(orig_timestamp))
}
}
impl FromStr for Timestamp {
type Err = TimestampError;
fn from_str(s: &str) -> Result<Timestamp, TimestampError> {
datetime::parse_timestamp(s).ok_or(TimestampError::ParseFailure)
}
}
impl fmt::Display for Timestamp {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
datetime::DateTime::from(*self).fmt(f)
}
}
#[cfg(kani)]
mod proofs {
use super::*;
#[cfg(feature = "std")]
#[kani::proof]
#[kani::unwind(3)]
fn check_timestamp_roundtrip_via_system_time() {
let seconds = kani::any();
let nanos = kani::any();
let mut timestamp = Timestamp { seconds, nanos };
timestamp.normalize();
if let Ok(system_time) = std::time::SystemTime::try_from(timestamp) {
assert_eq!(Timestamp::from(system_time), timestamp);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "std")]
use proptest::prelude::*;
#[cfg(feature = "std")]
use std::time::{self, SystemTime, UNIX_EPOCH};
#[cfg(feature = "std")]
proptest! {
#[test]
fn check_system_time_roundtrip(
system_time in SystemTime::arbitrary(),
) {
prop_assert_eq!(SystemTime::try_from(Timestamp::from(system_time)).unwrap(), system_time);
}
}
#[cfg(feature = "std")]
#[test]
fn check_timestamp_negative_seconds() {
// Representative tests for the case of timestamps before the UTC Epoch time:
// validate the expected behaviour that "negative second values with fractions
// must still have non-negative nanos values that count forward in time"
// https://protobuf.dev/reference/protobuf/google.protobuf/#timestamp
//
// To ensure cross-platform compatibility, all nanosecond values in these
// tests are in minimum 100 ns increments. This does not affect the general
// character of the behaviour being tested, but ensures that the tests are
// valid for both POSIX (1 ns precision) and Windows (100 ns precision).
assert_eq!(
Timestamp::from(UNIX_EPOCH - time::Duration::new(1_001, 0)),
Timestamp {
seconds: -1_001,
nanos: 0
}
);
assert_eq!(
Timestamp::from(UNIX_EPOCH - time::Duration::new(0, 999_999_900)),
Timestamp {
seconds: -1,
nanos: 100
}
);
assert_eq!(
Timestamp::from(UNIX_EPOCH - time::Duration::new(2_001_234, 12_300)),
Timestamp {
seconds: -2_001_235,
nanos: 999_987_700
}
);
assert_eq!(
Timestamp::from(UNIX_EPOCH - time::Duration::new(768, 65_432_100)),
Timestamp {
seconds: -769,
nanos: 934_567_900
}
);
}
#[cfg(all(unix, feature = "std"))]
#[test]
fn check_timestamp_negative_seconds_1ns() {
// UNIX-only test cases with 1 ns precision
assert_eq!(
Timestamp::from(UNIX_EPOCH - time::Duration::new(0, 999_999_999)),
Timestamp {
seconds: -1,
nanos: 1
}
);
assert_eq!(
Timestamp::from(UNIX_EPOCH - time::Duration::new(1_234_567, 123)),
Timestamp {
seconds: -1_234_568,
nanos: 999_999_877
}
);
assert_eq!(
Timestamp::from(UNIX_EPOCH - time::Duration::new(890, 987_654_321)),
Timestamp {
seconds: -891,
nanos: 12_345_679
}
);
}
#[cfg(feature = "std")]
#[test]
fn check_timestamp_normalize() {
// Make sure that `Timestamp::normalize` behaves correctly on and near overflow.
#[rustfmt::skip] // Don't mangle the table formatting.
let cases = [
// --- Table of test cases ---
// test seconds test nanos expected seconds expected nanos
(line!(), 0, 0, 0, 0),
(line!(), 1, 1, 1, 1),
(line!(), -1, -1, -2, 999_999_999),
(line!(), 0, 999_999_999, 0, 999_999_999),
(line!(), 0, -999_999_999, -1, 1),
(line!(), 0, 1_000_000_000, 1, 0),
(line!(), 0, -1_000_000_000, -1, 0),
(line!(), 0, 1_000_000_001, 1, 1),
(line!(), 0, -1_000_000_001, -2, 999_999_999),
(line!(), -1, 1, -1, 1),
(line!(), 1, -1, 0, 999_999_999),
(line!(), -1, 1_000_000_000, 0, 0),
(line!(), 1, -1_000_000_000, 0, 0),
(line!(), i64::MIN , 0, i64::MIN , 0),
(line!(), i64::MIN + 1, 0, i64::MIN + 1, 0),
(line!(), i64::MIN , 1, i64::MIN , 1),
(line!(), i64::MIN , 1_000_000_000, i64::MIN + 1, 0),
(line!(), i64::MIN , -1_000_000_000, i64::MIN , 0),
(line!(), i64::MIN + 1, -1_000_000_000, i64::MIN , 0),
(line!(), i64::MIN + 2, -1_000_000_000, i64::MIN + 1, 0),
(line!(), i64::MIN , -1_999_999_998, i64::MIN , 0),
(line!(), i64::MIN + 1, -1_999_999_998, i64::MIN , 0),
(line!(), i64::MIN + 2, -1_999_999_998, i64::MIN , 2),
(line!(), i64::MIN , -1_999_999_999, i64::MIN , 0),
(line!(), i64::MIN + 1, -1_999_999_999, i64::MIN , 0),
(line!(), i64::MIN + 2, -1_999_999_999, i64::MIN , 1),
(line!(), i64::MIN , -2_000_000_000, i64::MIN , 0),
(line!(), i64::MIN + 1, -2_000_000_000, i64::MIN , 0),
(line!(), i64::MIN + 2, -2_000_000_000, i64::MIN , 0),
(line!(), i64::MIN , -999_999_998, i64::MIN , 0),
(line!(), i64::MIN + 1, -999_999_998, i64::MIN , 2),
(line!(), i64::MAX , 0, i64::MAX , 0),
(line!(), i64::MAX - 1, 0, i64::MAX - 1, 0),
(line!(), i64::MAX , -1, i64::MAX - 1, 999_999_999),
(line!(), i64::MAX , 1_000_000_000, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 1_000_000_000, i64::MAX , 0),
(line!(), i64::MAX - 2, 1_000_000_000, i64::MAX - 1, 0),
(line!(), i64::MAX , 1_999_999_998, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 1_999_999_998, i64::MAX , 999_999_998),
(line!(), i64::MAX - 2, 1_999_999_998, i64::MAX - 1, 999_999_998),
(line!(), i64::MAX , 1_999_999_999, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 1_999_999_999, i64::MAX , 999_999_999),
(line!(), i64::MAX - 2, 1_999_999_999, i64::MAX - 1, 999_999_999),
(line!(), i64::MAX , 2_000_000_000, i64::MAX , 999_999_999),
(line!(), i64::MAX - 1, 2_000_000_000, i64::MAX , 999_999_999),
(line!(), i64::MAX - 2, 2_000_000_000, i64::MAX , 0),
(line!(), i64::MAX , 999_999_998, i64::MAX , 999_999_998),
(line!(), i64::MAX - 1, 999_999_998, i64::MAX - 1, 999_999_998),
];
for case in cases.iter() {
let test_timestamp = crate::Timestamp {
seconds: case.1,
nanos: case.2,
};
assert_eq!(
test_timestamp.normalized(),
crate::Timestamp {
seconds: case.3,
nanos: case.4,
},
"test case on line {} doesn't match",
case.0,
);
}
}
#[cfg(feature = "arbitrary")]
#[test]
fn check_timestamp_implements_arbitrary() {
use arbitrary::{Arbitrary, Unstructured};
let mut unstructured = Unstructured::new(&[]);
assert_eq!(
Timestamp::arbitrary(&mut unstructured),
Ok(Timestamp {
seconds: 0,
nanos: 0
})
);
}
}

View File

@@ -0,0 +1,70 @@
use super::*;
/// URL/resource name that uniquely identifies the type of the serialized protocol buffer message,
/// e.g. `type.googleapis.com/google.protobuf.Duration`.
///
/// This string must contain at least one "/" character.
///
/// The last segment of the URL's path must represent the fully qualified name of the type (as in
/// `path/google.protobuf.Duration`). The name should be in a canonical form (e.g., leading "." is
/// not accepted).
///
/// If no scheme is provided, `https` is assumed.
///
/// Schemes other than `http`, `https` (or the empty scheme) might be used with implementation
/// specific semantics.
#[derive(Debug, Eq, PartialEq)]
pub(crate) struct TypeUrl<'a> {
/// Fully qualified name of the type, e.g. `google.protobuf.Duration`
pub(crate) full_name: &'a str,
}
impl<'a> TypeUrl<'a> {
pub(crate) fn new(s: &'a str) -> core::option::Option<Self> {
// Must contain at least one "/" character.
let slash_pos = s.rfind('/')?;
// The last segment of the URL's path must represent the fully qualified name
// of the type (as in `path/google.protobuf.Duration`)
let full_name = s.get((slash_pos + 1)..)?;
// The name should be in a canonical form (e.g., leading "." is not accepted).
if full_name.starts_with('.') {
return None;
}
Some(Self { full_name })
}
}
// /// Compute the type URL for the given `google.protobuf` type, using `type.googleapis.com` as the
// /// authority for the URL.
// pub(crate) fn type_url_for<T: Name>() -> String {
// format!("type.googleapis.com/{}.{}", T::PACKAGE, T::NAME)
// }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn check_type_url_parsing() {
let example_type_name = "google.protobuf.Duration";
let url = TypeUrl::new("type.googleapis.com/google.protobuf.Duration").unwrap();
assert_eq!(url.full_name, example_type_name);
let full_url =
TypeUrl::new("https://type.googleapis.com/google.protobuf.Duration").unwrap();
assert_eq!(full_url.full_name, example_type_name);
let relative_url = TypeUrl::new("/google.protobuf.Duration").unwrap();
assert_eq!(relative_url.full_name, example_type_name);
// The name should be in a canonical form (e.g., leading "." is not accepted).
assert_eq!(TypeUrl::new("/.google.protobuf.Duration"), None);
// Must contain at least one "/" character.
assert_eq!(TypeUrl::new("google.protobuf.Duration"), None);
}
}

View File

@@ -1,3 +1,20 @@
## v0.12.24
- Refactor cookie handling to an internal middleware.
- Refactor internal random generator.
- Refactor base64 encoding to reduce a copy.
- Documentation updates.
## v0.12.23
- Add `ClientBuilder::unix_socket(path)` option that will force all requests over that Unix Domain Socket.
- Add `ClientBuilder::retry(policy)` and `reqwest::retry::Builder` to configure automatic retries.
- Add `ClientBuilder::dns_resolver2()` with more ergonomic argument bounds, allowing more resolver implementations.
- Add `http3_*` options to `blocking::ClientBuilder`.
- Fix default TCP timeout values to enabled and faster.
- Fix SOCKS proxies to default to port 1080
- (wasm) Add cache methods to `RequestBuilder`.
## v0.12.22
- Fix socks proxies when resolving IPv6 destinations.

View File

@@ -1,6 +1,6 @@
[package]
name = "reqwest"
version = "0.12.22"
version = "0.12.24"
description = "higher level HTTP client library"
keywords = ["http", "request", "client"]
categories = ["web-programming::http-client", "wasm"]
@@ -113,7 +113,6 @@ url = "2.4"
bytes = "1.2"
serde = "1.0"
serde_urlencoded = "0.7.1"
tower-service = "0.3"
futures-core = { version = "0.3.28", default-features = false }
futures-util = { version = "0.3.28", default-features = false, optional = true }
sync_wrapper = { version = "1.0", features = ["futures"] }
@@ -135,7 +134,8 @@ h2 = { version = "0.4", optional = true }
log = "0.4.17"
percent-encoding = "2.3"
tokio = { version = "1.0", default-features = false, features = ["net", "time"] }
tower = { version = "0.5.2", default-features = false, features = ["timeout", "util"] }
tower = { version = "0.5.2", default-features = false, features = ["retry", "timeout", "util"] }
tower-service = "0.3"
tower-http = { version = "0.6.5", default-features = false, features = ["follow-redirect"] }
pin-project-lite = "0.2.11"
@@ -211,7 +211,8 @@ features = [
"ServiceWorkerGlobalScope",
"RequestCredentials",
"File",
"ReadableStream"
"ReadableStream",
"RequestCache"
]
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]

View File

@@ -3,7 +3,7 @@
[![crates.io](https://img.shields.io/crates/v/reqwest.svg)](https://crates.io/crates/reqwest)
[![Documentation](https://docs.rs/reqwest/badge.svg)](https://docs.rs/reqwest)
[![MIT/Apache-2 licensed](https://img.shields.io/crates/l/reqwest.svg)](./LICENSE-APACHE)
[![CI](https://github.com/seanmonstar/reqwest/workflows/CI/badge.svg)](https://github.com/seanmonstar/reqwest/actions?query=workflow%3ACI)
[![CI](https://github.com/seanmonstar/reqwest/actions/workflows/ci.yml/badge.svg)](https://github.com/seanmonstar/reqwest/actions/workflows/ci.yml)
An ergonomic, batteries-included HTTP Client for Rust.

View File

@@ -1,7 +1,7 @@
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
use std::task::{Context, Poll, ready};
use std::time::Duration;
use bytes::Bytes;
@@ -107,13 +107,9 @@ impl Body {
use http_body_util::StreamBody;
let body = http_body_util::BodyExt::boxed(StreamBody::new(sync_wrapper::SyncStream::new(
stream
.map_ok(|d| Frame::data(Bytes::from(d)))
.map_err(Into::into),
stream.map_ok(|d| Frame::data(Bytes::from(d))).map_err(Into::into),
)));
Body {
inner: Inner::Streaming(body),
}
Body { inner: Inner::Streaming(body) }
}
pub(crate) fn empty() -> Body {
@@ -121,9 +117,7 @@ impl Body {
}
pub(crate) fn reusable(chunk: Bytes) -> Body {
Body {
inner: Inner::Reusable(chunk),
}
Body { inner: Inner::Reusable(chunk) }
}
/// Wrap a [`HttpBody`] in a box inside `Body`.
@@ -149,18 +143,7 @@ impl Body {
let boxed = IntoBytesBody { inner }.map_err(Into::into).boxed();
Body {
inner: Inner::Streaming(boxed),
}
}
pub(crate) fn try_reuse(self) -> (Option<Bytes>, Self) {
let reuse = match self.inner {
Inner::Reusable(ref chunk) => Some(chunk.clone()),
Inner::Streaming { .. } => None,
};
(reuse, self)
Body { inner: Inner::Streaming(boxed) }
}
pub(crate) fn try_clone(&self) -> Option<Body> {
@@ -296,18 +279,11 @@ impl HttpBody for Body {
// ===== impl TotalTimeoutBody =====
pub(crate) fn total_timeout<B>(body: B, timeout: Pin<Box<Sleep>>) -> TotalTimeoutBody<B> {
TotalTimeoutBody {
inner: body,
timeout,
}
TotalTimeoutBody { inner: body, timeout }
}
pub(crate) fn with_read_timeout<B>(body: B, timeout: Duration) -> ReadTimeoutBody<B> {
ReadTimeoutBody {
inner: body,
sleep: None,
timeout,
}
ReadTimeoutBody { inner: body, sleep: None, timeout }
}
impl<B> hyper::body::Body for TotalTimeoutBody<B>

File diff suppressed because it is too large Load Diff

View File

@@ -4,52 +4,31 @@ pub(crate) mod connect;
pub(crate) mod dns;
mod pool;
use crate::async_impl::body::ResponseBody;
use crate::async_impl::h3_client::pool::{Key, Pool, PoolClient};
#[cfg(feature = "cookies")]
use crate::cookie;
use crate::error::{BoxError, Error, Kind};
use crate::{error, Body};
use std::future::{self, Future};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use connect::H3Connector;
use http::{Request, Response};
use log::trace;
use std::future::{self, Future};
use std::pin::Pin;
#[cfg(feature = "cookies")]
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use sync_wrapper::SyncWrapper;
use tower::Service;
use crate::async_impl::body::ResponseBody;
use crate::async_impl::h3_client::pool::{Key, Pool, PoolClient};
use crate::error::{BoxError, Error, Kind};
use crate::{Body, error};
#[derive(Clone)]
pub(crate) struct H3Client {
pool: Pool,
connector: H3Connector,
#[cfg(feature = "cookies")]
cookie_store: Option<Arc<dyn cookie::CookieStore>>,
}
impl H3Client {
#[cfg(not(feature = "cookies"))]
pub fn new(connector: H3Connector, pool_timeout: Option<Duration>) -> Self {
H3Client {
pool: Pool::new(pool_timeout),
connector,
}
}
#[cfg(feature = "cookies")]
pub fn new(
connector: H3Connector,
pool_timeout: Option<Duration>,
cookie_store: Option<Arc<dyn cookie::CookieStore>>,
) -> Self {
H3Client {
pool: Pool::new(pool_timeout),
connector,
cookie_store,
}
H3Client { pool: Pool::new(pool_timeout), connector }
}
async fn get_pooled_client(&mut self, key: Key) -> Result<PoolClient, BoxError> {
@@ -79,7 +58,6 @@ impl H3Client {
Ok(self.pool.new_connection(lock, driver, tx))
}
#[cfg(not(feature = "cookies"))]
async fn send_request(
mut self,
key: Key,
@@ -89,46 +67,7 @@ impl H3Client {
Ok(client) => client,
Err(e) => return Err(error::request(e)),
};
pooled
.send_request(req)
.await
.map_err(|e| Error::new(Kind::Request, Some(e)))
}
#[cfg(feature = "cookies")]
async fn send_request(
mut self,
key: Key,
mut req: Request<Body>,
) -> Result<Response<ResponseBody>, Error> {
let mut pooled = match self.get_pooled_client(key).await {
Ok(client) => client,
Err(e) => return Err(error::request(e)),
};
let url = url::Url::parse(req.uri().to_string().as_str()).unwrap();
if let Some(cookie_store) = self.cookie_store.as_ref() {
if req.headers().get(crate::header::COOKIE).is_none() {
let headers = req.headers_mut();
crate::util::add_cookie_header(headers, &**cookie_store, &url);
}
}
let res = pooled
.send_request(req)
.await
.map_err(|e| Error::new(Kind::Request, Some(e)));
if let Some(ref cookie_store) = self.cookie_store {
if let Ok(res) = &res {
let mut cookies = cookie::extract_response_cookie_headers(res.headers()).peekable();
if cookies.peek().is_some() {
cookie_store.set_cookies(&mut cookies, &url);
}
}
}
res
pooled.send_request(req).await.map_err(|e| Error::new(Kind::Request, Some(e)))
}
pub fn request(&self, mut req: Request<Body>) -> H3ResponseFuture {
@@ -137,7 +76,7 @@ impl H3Client {
Err(e) => {
return H3ResponseFuture {
inner: SyncWrapper::new(Box::pin(future::ready(Err(e)))),
}
};
}
};
H3ResponseFuture {
@@ -155,9 +94,7 @@ impl Service<Request<Body>> for H3Client {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request<Body>) -> Self::Future {
self.request(req)
}
fn call(&mut self, req: Request<Body>) -> Self::Future { self.request(req) }
}
pub(crate) struct H3ResponseFuture {

View File

@@ -2,7 +2,6 @@
use std::borrow::Cow;
use std::fmt;
use std::pin::Pin;
#[cfg(feature = "stream")]
use std::io;
#[cfg(feature = "stream")]
@@ -13,9 +12,8 @@ use mime_guess::Mime;
use percent_encoding::{self, AsciiSet, NON_ALPHANUMERIC};
#[cfg(feature = "stream")]
use tokio::fs::File;
use futures_core::Stream;
use futures_util::{future, stream, StreamExt};
use futures_util::{StreamExt, future, stream};
use super::Body;
use crate::header::HeaderMap;
@@ -53,24 +51,16 @@ pub(crate) trait PartProps {
// ===== impl Form =====
impl Default for Form {
fn default() -> Self {
Self::new()
}
fn default() -> Self { Self::new() }
}
impl Form {
/// Creates a new async Form without any content.
pub fn new() -> Form {
Form {
inner: FormParts::new(),
}
}
pub fn new() -> Form { Form { inner: FormParts::new() } }
/// Get the boundary that this form will use.
#[inline]
pub fn boundary(&self) -> &str {
self.inner.boundary()
}
pub fn boundary(&self) -> &str { self.inner.boundary() }
/// Add a data field with supplied name and value.
///
@@ -118,9 +108,7 @@ impl Form {
/// Adds a customized Part.
pub fn part<T>(self, name: T, part: Part) -> Form
where
T: Into<Cow<'static, str>>,
{
where T: Into<Cow<'static, str>> {
self.with_inner(move |inner| inner.part(name, part))
}
@@ -170,9 +158,7 @@ impl Form {
as Pin<Box<dyn Stream<Item = crate::Result<Bytes>> + Send + Sync>>
});
// append special ending boundary
let last = stream::once(future::ready(Ok(
format!("--{}--\r\n", self.boundary()).into()
)));
let last = stream::once(future::ready(Ok(format!("--{}--\r\n", self.boundary()).into())));
Box::pin(stream.chain(last))
}
@@ -186,15 +172,10 @@ impl Form {
T: Into<Cow<'static, str>>,
{
// start with boundary
let boundary = stream::once(future::ready(Ok(
format!("--{}\r\n", self.boundary()).into()
)));
let boundary = stream::once(future::ready(Ok(format!("--{}\r\n", self.boundary()).into())));
// append headers
let header = stream::once(future::ready(Ok({
let mut h = self
.inner
.percent_encoding
.encode_headers(&name.into(), &part.meta);
let mut h = self.inner.percent_encoding.encode_headers(&name.into(), &part.meta);
h.extend_from_slice(b"\r\n\r\n");
h.into()
})));
@@ -205,24 +186,16 @@ impl Form {
.chain(stream::once(future::ready(Ok("\r\n".into()))))
}
pub(crate) fn compute_length(&mut self) -> Option<u64> {
self.inner.compute_length()
}
pub(crate) fn compute_length(&mut self) -> Option<u64> { self.inner.compute_length() }
fn with_inner<F>(self, func: F) -> Self
where
F: FnOnce(FormParts<Part>) -> FormParts<Part>,
{
Form {
inner: func(self.inner),
}
where F: FnOnce(FormParts<Part>) -> FormParts<Part> {
Form { inner: func(self.inner) }
}
}
impl fmt::Debug for Form {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt_fields("Form", f)
}
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.fmt_fields("Form", f) }
}
// ===== impl Part =====
@@ -230,9 +203,7 @@ impl fmt::Debug for Form {
impl Part {
/// Makes a text parameter.
pub fn text<T>(value: T) -> Part
where
T: Into<Cow<'static, str>>,
{
where T: Into<Cow<'static, str>> {
let body = match value.into() {
Cow::Borrowed(slice) => Body::from(slice),
Cow::Owned(string) => Body::from(string),
@@ -242,9 +213,7 @@ impl Part {
/// Makes a new parameter from arbitrary bytes.
pub fn bytes<T>(value: T) -> Part
where
T: Into<Cow<'static, [u8]>>,
{
where T: Into<Cow<'static, [u8]>> {
let body = match value.into() {
Cow::Borrowed(slice) => Body::from(slice),
Cow::Owned(vec) => Body::from(vec),
@@ -253,9 +222,7 @@ impl Part {
}
/// Makes a new parameter from an arbitrary stream.
pub fn stream<T: Into<Body>>(value: T) -> Part {
Part::new(value.into(), None)
}
pub fn stream<T: Into<Body>>(value: T) -> Part { Part::new(value.into(), None) }
/// Makes a new parameter from an arbitrary stream with a known length. This is particularly
/// useful when adding something like file contents as a stream, where you can know the content
@@ -273,9 +240,7 @@ impl Part {
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
pub async fn file<T: AsRef<Path>>(path: T) -> io::Result<Part> {
let path = path.as_ref();
let file_name = path
.file_name()
.map(|filename| filename.to_string_lossy().into_owned());
let file_name = path.file_name().map(|filename| filename.to_string_lossy().into_owned());
let ext = path.extension().and_then(|ext| ext.to_str()).unwrap_or("");
let mime = mime_guess::from_ext(ext).first_or_octet_stream();
let file = File::open(path).await?;
@@ -286,19 +251,11 @@ impl Part {
}
.mime(mime);
Ok(if let Some(file_name) = file_name {
field.file_name(file_name)
} else {
field
})
Ok(if let Some(file_name) = file_name { field.file_name(file_name) } else { field })
}
fn new(value: Body, body_length: Option<u64>) -> Part {
Part {
meta: PartMetadata::new(),
value,
body_length,
}
Part { meta: PartMetadata::new(), value, body_length }
}
/// Tries to set the mime of this part.
@@ -307,15 +264,11 @@ impl Part {
}
// Re-export when mime 0.4 is available, with split MediaType/MediaRange.
fn mime(self, mime: Mime) -> Part {
self.with_inner(move |inner| inner.mime(mime))
}
fn mime(self, mime: Mime) -> Part { self.with_inner(move |inner| inner.mime(mime)) }
/// Sets the filename, builder style.
pub fn file_name<T>(self, filename: T) -> Part
where
T: Into<Cow<'static, str>>,
{
where T: Into<Cow<'static, str>> {
self.with_inner(move |inner| inner.file_name(filename))
}
@@ -325,13 +278,8 @@ impl Part {
}
fn with_inner<F>(self, func: F) -> Self
where
F: FnOnce(PartMetadata) -> PartMetadata,
{
Part {
meta: func(self.meta),
..self
}
where F: FnOnce(PartMetadata) -> PartMetadata {
Part { meta: func(self.meta), ..self }
}
}
@@ -346,16 +294,10 @@ impl fmt::Debug for Part {
impl PartProps for Part {
fn value_len(&self) -> Option<u64> {
if self.body_length.is_some() {
self.body_length
} else {
self.value.content_length()
}
if self.body_length.is_some() { self.body_length } else { self.value.content_length() }
}
fn metadata(&self) -> &PartMetadata {
&self.meta
}
fn metadata(&self) -> &PartMetadata { &self.meta }
}
// ===== impl FormParts =====
@@ -370,15 +312,11 @@ impl<P: PartProps> FormParts<P> {
}
}
pub(crate) fn boundary(&self) -> &str {
&self.boundary
}
pub(crate) fn boundary(&self) -> &str { &self.boundary }
/// Adds a customized Part.
pub(crate) fn part<T>(mut self, name: T, part: P) -> Self
where
T: Into<Cow<'static, str>>,
{
where T: Into<Cow<'static, str>> {
self.fields.push((name.into(), part));
self
}
@@ -455,11 +393,7 @@ impl<P: fmt::Debug> FormParts<P> {
impl PartMetadata {
pub(crate) fn new() -> Self {
PartMetadata {
mime: None,
file_name: None,
headers: HeaderMap::default(),
}
PartMetadata { mime: None, file_name: None, headers: HeaderMap::default() }
}
pub(crate) fn mime(mut self, mime: Mime) -> Self {
@@ -468,17 +402,13 @@ impl PartMetadata {
}
pub(crate) fn file_name<T>(mut self, filename: T) -> Self
where
T: Into<Cow<'static, str>>,
{
where T: Into<Cow<'static, str>> {
self.file_name = Some(filename.into());
self
}
pub(crate) fn headers<T>(mut self, headers: T) -> Self
where
T: Into<HeaderMap>,
{
where T: Into<HeaderMap> {
self.headers = headers.into();
self
}
@@ -497,12 +427,8 @@ impl PartMetadata {
}
// https://url.spec.whatwg.org/#fragment-percent-encode-set
const FRAGMENT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS
.add(b' ')
.add(b'"')
.add(b'<')
.add(b'>')
.add(b'`');
const FRAGMENT_ENCODE_SET: &AsciiSet =
&percent_encoding::CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`');
// https://url.spec.whatwg.org/#path-percent-encode-set
const PATH_ENCODE_SET: &AsciiSet = &FRAGMENT_ENCODE_SET.add(b'#').add(b'?').add(b'{').add(b'}');
@@ -600,20 +526,18 @@ fn gen_boundary() -> String {
#[cfg(test)]
mod tests {
use super::*;
use futures_util::stream;
use futures_util::TryStreamExt;
use std::future;
use futures_util::{TryStreamExt, stream};
use tokio::{self, runtime};
use super::*;
#[test]
fn form_empty() {
let form = Form::new();
let rt = runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("new rt");
let rt = runtime::Builder::new_current_thread().enable_all().build().expect("new rt");
let body = form.stream().into_stream();
let s = body.map_ok(|try_c| try_c.to_vec()).try_concat();
@@ -628,19 +552,15 @@ mod tests {
"reader1",
Part::stream(Body::stream(stream::once(future::ready::<
Result<String, crate::Error>,
>(Ok(
"part1".to_owned()
))))),
>(Ok("part1".to_owned()))))),
)
.part("key1", Part::text("value1"))
.part("key2", Part::text("value2").mime(mime::IMAGE_BMP))
.part("key2", Part::text("value2").mime(mime_guess::mime::IMAGE_BMP))
.part(
"reader2",
Part::stream(Body::stream(stream::once(future::ready::<
Result<String, crate::Error>,
>(Ok(
"part2".to_owned()
))))),
>(Ok("part2".to_owned()))))),
)
.part("key3", Part::text("value3").file_name("filename"));
form.inner.boundary = "boundary".to_string();
@@ -660,26 +580,20 @@ mod tests {
--boundary\r\n\
Content-Disposition: form-data; name=\"key3\"; filename=\"filename\"\r\n\r\n\
value3\r\n--boundary--\r\n";
let rt = runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("new rt");
let rt = runtime::Builder::new_current_thread().enable_all().build().expect("new rt");
let body = form.stream().into_stream();
let s = body.map(|try_c| try_c.map(|r| r.to_vec())).try_concat();
let out = rt.block_on(s).unwrap();
// These prints are for debug purposes in case the test fails
println!(
"START REAL\n{}\nEND REAL",
std::str::from_utf8(&out).unwrap()
);
println!("START REAL\n{}\nEND REAL", std::str::from_utf8(&out).unwrap());
println!("START EXPECTED\n{expected}\nEND EXPECTED");
assert_eq!(std::str::from_utf8(&out).unwrap(), expected);
}
#[test]
fn stream_to_end_with_header() {
let mut part = Part::text("value2").mime(mime::IMAGE_BMP);
let mut part = Part::text("value2").mime(mime_guess::mime::IMAGE_BMP);
let mut headers = HeaderMap::new();
headers.insert("Hdr3", "/a/b/c".parse().unwrap());
part = part.headers(headers);
@@ -692,19 +606,13 @@ mod tests {
\r\n\
value2\r\n\
--boundary--\r\n";
let rt = runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("new rt");
let rt = runtime::Builder::new_current_thread().enable_all().build().expect("new rt");
let body = form.stream().into_stream();
let s = body.map(|try_c| try_c.map(|r| r.to_vec())).try_concat();
let out = rt.block_on(s).unwrap();
// These prints are for debug purposes in case the test fails
println!(
"START REAL\n{}\nEND REAL",
std::str::from_utf8(&out).unwrap()
);
println!("START REAL\n{}\nEND REAL", std::str::from_utf8(&out).unwrap());
println!("START EXPECTED\n{expected}\nEND EXPECTED");
assert_eq!(std::str::from_utf8(&out).unwrap(), expected);
}
@@ -714,9 +622,7 @@ mod tests {
// Setup an arbitrary data stream
let stream_data = b"just some stream data";
let stream_len = stream_data.len();
let stream_data = stream_data
.chunks(3)
.map(|c| Ok::<_, std::io::Error>(Bytes::from(c)));
let stream_data = stream_data.chunks(3).map(|c| Ok::<_, std::io::Error>(Bytes::from(c)));
let the_stream = futures_util::stream::iter(stream_data);
let bytes_data = b"some bytes data".to_vec();

Some files were not shown because too many files have changed in this diff Show More