diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index fec6cdb87..49dfd0da8 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -11,8 +11,8 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true -env: - RUSTFLAGS: "-D warnings" +permissions: + contents: read jobs: @@ -69,6 +69,8 @@ jobs: strategy: matrix: os: [ubuntu-latest, macOS-latest, windows-latest] + env: + RUSTFLAGS: "-D warnings" steps: - uses: actions/checkout@v4 - uses: hecrj/setup-rust-action@v2 @@ -93,7 +95,7 @@ jobs: CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback - name: Get MSRV from manifest file id: msrv - run: echo "version=$(yq '.workspace.package.rust-version' Cargo.toml)" >> $GITHUB_OUTPUT + run: echo "version=$(yq '.workspace.package.rust-version' Cargo.toml)" >> "$GITHUB_OUTPUT" - uses: hecrj/setup-rust-action@v2 with: rust-version: ${{ steps.msrv.outputs.version }} @@ -154,6 +156,7 @@ jobs: - uses: obi1kenobi/cargo-semver-checks-action@v2 with: feature-group: all-features + exclude: grpc external-types: runs-on: ubuntu-latest @@ -161,11 +164,13 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: - toolchain: nightly-2024-06-30 + toolchain: nightly-2025-05-04 - name: Install cargo-check-external-types uses: taiki-e/cache-cargo-install-action@v2 with: - tool: cargo-check-external-types@0.1.13 + tool: cargo-check-external-types@0.2.0 - uses: taiki-e/install-action@cargo-hack - uses: Swatinem/rust-cache@v2 - - run: cargo hack --no-private check-external-types + - run: cargo hack --no-private check-external-types --all-features + env: + RUSTFLAGS: "-D warnings" diff --git a/Cargo.toml b/Cargo.toml index 351e1824f..ce9bc4d43 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,5 +41,8 @@ missing_docs = "warn" rust_2018_idioms = "warn" unreachable_pub = "warn" +[workspace.lints.clippy] +uninlined_format_args = "deny" + [workspace.lints.rustdoc] broken_intra_doc_links = "deny" diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index 54c2b83b4..b4994a5e8 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -5,7 +5,7 @@ license = "MIT" edition = "2021" [dependencies] -protox = "0.8" +protox = "0.9" prettyplease = "0.2" quote = "1" syn = "2" diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 5282863d8..24e71261a 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -279,7 +279,7 @@ default = ["full"] [dependencies] # Common dependencies tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] } -prost = "0.13" +prost = "0.14" tonic = { path = "../tonic" } # Optional dependencies tonic-web = { path = "../tonic-web", optional = true } @@ -295,9 +295,8 @@ serde = { version = "1.0", features = ["derive"], optional = true } serde_json = { version = "1.0", optional = true } tracing = { version = "0.1.16", optional = true } tracing-subscriber = { version = "0.3", features = ["tracing-log", "fmt"], optional = true } -prost-types = { version = "0.13", optional = true } +prost-types = { version = "0.14", optional = true } http = { version = "1", optional = true } -http-body = { version = "1", optional = true } hyper = { version = "1", optional = true } hyper-util = { version = "0.1.4", optional = true } listenfd = { version = "1.0", optional = true } diff --git a/examples/build.rs b/examples/build.rs index c17ad958b..84b9e2c6c 100644 --- a/examples/build.rs +++ b/examples/build.rs @@ -2,7 +2,6 @@ use std::{env, path::PathBuf}; fn main() { tonic_build::configure() - .type_attribute("routeguide.Point", "#[derive(Hash)]") .compile_protos(&["proto/routeguide/route_guide.proto"], &["proto"]) .unwrap(); diff --git a/examples/helloworld-tutorial.md b/examples/helloworld-tutorial.md index 2e910a9ca..972509a5f 100644 --- a/examples/helloworld-tutorial.md +++ b/examples/helloworld-tutorial.md @@ -113,7 +113,7 @@ path = "src/client.rs" [dependencies] tonic = "*" -prost = "0.13" +prost = "0.14" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } [build-dependencies] diff --git a/examples/routeguide-tutorial.md b/examples/routeguide-tutorial.md index c434ff8b7..9e821fbc4 100644 --- a/examples/routeguide-tutorial.md +++ b/examples/routeguide-tutorial.md @@ -175,7 +175,7 @@ Edit `Cargo.toml` and add all the dependencies we'll need for this example: ```toml [dependencies] tonic = "*" -prost = "0.13" +prost = "0.14" tokio = { version = "1.0", features = ["rt-multi-thread", "macros", "sync", "time"] } tokio-stream = "0.1" @@ -335,27 +335,6 @@ the corresponding `data` module to load and deserialize it in **Note:** If you are following along, you'll need to change the data file's path from `examples/data/route_guide_db.json` to `data/route_guide_db.json`. -Next, we need to implement `Hash` and `Eq` for `Point`, so we can use point values as map keys: - -```rust -use std::hash::{Hasher, Hash}; -``` - -```rust -impl Hash for Point { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.latitude.hash(state); - self.longitude.hash(state); - } -} - -impl Eq for Point {} - -``` - Lastly, we need implement two helper functions: `in_range` and `calc_distance`. We'll use them when performing feature lookups. You can find them in [examples/src/routeguide/server.rs][in-range-fn]. diff --git a/examples/src/authentication/client.rs b/examples/src/authentication/client.rs index 2f0a17cad..8c0b8100b 100644 --- a/examples/src/authentication/client.rs +++ b/examples/src/authentication/client.rs @@ -22,7 +22,7 @@ async fn main() -> Result<(), Box> { let response = client.unary_echo(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/autoreload/server.rs b/examples/src/autoreload/server.rs index a181f75ec..f8794314f 100644 --- a/examples/src/autoreload/server.rs +++ b/examples/src/autoreload/server.rs @@ -30,7 +30,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); let server = Server::builder().add_service(GreeterServer::new(greeter)); diff --git a/examples/src/blocking/client.rs b/examples/src/blocking/client.rs index 3feb73985..5f512a64a 100644 --- a/examples/src/blocking/client.rs +++ b/examples/src/blocking/client.rs @@ -47,7 +47,7 @@ fn main() -> Result<()> { let response = client.say_hello(request)?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/cancellation/client.rs b/examples/src/cancellation/client.rs index 7e9a92772..49ad3d195 100644 --- a/examples/src/cancellation/client.rs +++ b/examples/src/cancellation/client.rs @@ -24,7 +24,7 @@ async fn main() -> Result<(), Box> { } }; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/cancellation/server.rs b/examples/src/cancellation/server.rs index 32d93e7d7..18f92ef2d 100644 --- a/examples/src/cancellation/server.rs +++ b/examples/src/cancellation/server.rs @@ -37,7 +37,7 @@ impl Greeter for MyGreeter { Ok(Response::new(reply)) }; let cancellation_future = async move { - println!("Request from {:?} cancelled by client", remote_addr); + println!("Request from {remote_addr:?} cancelled by client"); // If this future is executed it means the request future was dropped, // so it doesn't actually matter what is returned here Err(Status::cancelled("Request cancelled by client")) @@ -74,7 +74,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder() .add_service(GreeterServer::new(greeter)) diff --git a/examples/src/codec_buffers/client.rs b/examples/src/codec_buffers/client.rs index 267e19dbf..9f912c2b0 100644 --- a/examples/src/codec_buffers/client.rs +++ b/examples/src/codec_buffers/client.rs @@ -24,7 +24,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/codec_buffers/server.rs b/examples/src/codec_buffers/server.rs index b30c797d3..433fa0cf7 100644 --- a/examples/src/codec_buffers/server.rs +++ b/examples/src/codec_buffers/server.rs @@ -40,7 +40,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder() .add_service(GreeterServer::new(greeter)) diff --git a/examples/src/compression/server.rs b/examples/src/compression/server.rs index fd035d218..f106c3d74 100644 --- a/examples/src/compression/server.rs +++ b/examples/src/compression/server.rs @@ -31,7 +31,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); let service = GreeterServer::new(greeter) .send_compressed(CompressionEncoding::Gzip) diff --git a/examples/src/dynamic/server.rs b/examples/src/dynamic/server.rs index 14e0bee3e..1e31f18f3 100644 --- a/examples/src/dynamic/server.rs +++ b/examples/src/dynamic/server.rs @@ -77,7 +77,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); - println!("Grpc server listening on {}", addr); + println!("Grpc server listening on {addr}"); Server::builder() .add_routes(routes_builder.routes()) diff --git a/examples/src/dynamic_load_balance/client.rs b/examples/src/dynamic_load_balance/client.rs index 1f7ad0949..17f180d3a 100644 --- a/examples/src/dynamic_load_balance/client.rs +++ b/examples/src/dynamic_load_balance/client.rs @@ -27,36 +27,36 @@ async fn main() -> Result<(), Box> { println!("Added first endpoint"); let change = Change::Insert("1", e1); let res = rx.send(change).await; - println!("{:?}", res); + println!("{res:?}"); tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; println!("Added second endpoint"); let change = Change::Insert("2", e2); let res = rx.send(change).await; - println!("{:?}", res); + println!("{res:?}"); tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; println!("Removed first endpoint"); let change = Change::Remove("1"); let res = rx.send(change).await; - println!("{:?}", res); + println!("{res:?}"); tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; println!("Removed second endpoint"); let change = Change::Remove("2"); let res = rx.send(change).await; - println!("{:?}", res); + println!("{res:?}"); tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; println!("Added third endpoint"); let e3 = Endpoint::from_static("http://[::1]:50051"); let change = Change::Insert("3", e3); let res = rx.send(change).await; - println!("{:?}", res); + println!("{res:?}"); tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; println!("Removed third endpoint"); let change = Change::Remove("3"); let res = rx.send(change).await; - println!("{:?}", res); + println!("{res:?}"); demo_done.swap(true, SeqCst); }); @@ -68,7 +68,7 @@ async fn main() -> Result<(), Box> { let rx = client.unary_echo(request); if let Ok(resp) = timeout(tokio::time::Duration::from_secs(10), rx).await { - println!("RESPONSE={:?}", resp); + println!("RESPONSE={resp:?}"); } else { println!("did not receive value within 10 secs"); } diff --git a/examples/src/dynamic_load_balance/server.rs b/examples/src/dynamic_load_balance/server.rs index 7935e1cf5..1143ca57f 100644 --- a/examples/src/dynamic_load_balance/server.rs +++ b/examples/src/dynamic_load_balance/server.rs @@ -41,7 +41,7 @@ async fn main() -> Result<(), Box> { tokio::spawn(async move { if let Err(e) = serve.await { - eprintln!("Error = {:?}", e); + eprintln!("Error = {e:?}"); } tx.send(()).unwrap(); diff --git a/examples/src/gcp/client.rs b/examples/src/gcp/client.rs index 7f30db592..3d66f8695 100644 --- a/examples/src/gcp/client.rs +++ b/examples/src/gcp/client.rs @@ -21,7 +21,7 @@ async fn main() -> Result<(), Box> { .nth(1) .ok_or_else(|| "Expected a project name as the first argument.".to_string())?; - let bearer_token = format!("Bearer {}", token); + let bearer_token = format!("Bearer {token}"); let header_value: MetadataValue<_> = bearer_token.parse()?; let data_dir = std::path::PathBuf::from_iter([std::env!("CARGO_MANIFEST_DIR"), "data"]); @@ -44,13 +44,13 @@ async fn main() -> Result<(), Box> { let response = service .list_topics(Request::new(ListTopicsRequest { - project: format!("projects/{0}", project), + project: format!("projects/{project}"), page_size: 10, ..Default::default() })) .await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/grpc-web/client.rs b/examples/src/grpc-web/client.rs index fa64dd506..3d14c9665 100644 --- a/examples/src/grpc-web/client.rs +++ b/examples/src/grpc-web/client.rs @@ -23,7 +23,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/grpc-web/server.rs b/examples/src/grpc-web/server.rs index 9fb1a188d..0da7d0f7a 100644 --- a/examples/src/grpc-web/server.rs +++ b/examples/src/grpc-web/server.rs @@ -38,7 +38,7 @@ async fn main() -> Result<(), Box> { .into_inner() .named_layer(GreeterServer::new(greeter)); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder() // GrpcWeb is over http1 so we must enable it. diff --git a/examples/src/h2c/client.rs b/examples/src/h2c/client.rs index 240a874cb..5ecac52ea 100644 --- a/examples/src/h2c/client.rs +++ b/examples/src/h2c/client.rs @@ -23,7 +23,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/h2c/server.rs b/examples/src/h2c/server.rs index 866068a27..3fb97797a 100644 --- a/examples/src/h2c/server.rs +++ b/examples/src/h2c/server.rs @@ -36,7 +36,7 @@ async fn main() -> Result<(), Box> { let addr: SocketAddr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); let incoming = TcpListener::bind(addr).await?; let svc = Routes::new(GreeterServer::new(greeter)).prepare(); @@ -57,7 +57,7 @@ async fn main() -> Result<(), Box> { }); } Err(e) => { - eprintln!("Error accepting connection: {}", e); + eprintln!("Error accepting connection: {e}"); } } } diff --git a/examples/src/health/server.rs b/examples/src/health/server.rs index 4ae198d51..bd425740a 100644 --- a/examples/src/health/server.rs +++ b/examples/src/health/server.rs @@ -55,7 +55,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("HealthServer + GreeterServer listening on {}", addr); + println!("HealthServer + GreeterServer listening on {addr}"); Server::builder() .add_service(health_service) diff --git a/examples/src/helloworld/client.rs b/examples/src/helloworld/client.rs index 6e6678308..8a443b105 100644 --- a/examples/src/helloworld/client.rs +++ b/examples/src/helloworld/client.rs @@ -15,7 +15,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/helloworld/server.rs b/examples/src/helloworld/server.rs index c6398bb61..95b74168a 100644 --- a/examples/src/helloworld/server.rs +++ b/examples/src/helloworld/server.rs @@ -30,7 +30,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder() .add_service(GreeterServer::new(greeter)) diff --git a/examples/src/interceptor/client.rs b/examples/src/interceptor/client.rs index 6ef183b12..bac4a6acb 100644 --- a/examples/src/interceptor/client.rs +++ b/examples/src/interceptor/client.rs @@ -25,7 +25,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } @@ -34,7 +34,7 @@ async fn main() -> Result<(), Box> { /// `Status` here will cancel the request and have that status returned to /// the caller. fn intercept(req: Request<()>) -> Result, Status> { - println!("Intercepting request: {:?}", req); + println!("Intercepting request: {req:?}"); Ok(req) } diff --git a/examples/src/interceptor/server.rs b/examples/src/interceptor/server.rs index fd0cf462f..0061442ef 100644 --- a/examples/src/interceptor/server.rs +++ b/examples/src/interceptor/server.rs @@ -36,7 +36,7 @@ async fn main() -> Result<(), Box> { // structs. let svc = GreeterServer::with_interceptor(greeter, intercept); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder().add_service(svc).serve(addr).await?; @@ -47,7 +47,7 @@ async fn main() -> Result<(), Box> { /// is returned, it will cancel the request and return that status to the /// client. fn intercept(mut req: Request<()>) -> Result, Status> { - println!("Intercepting request: {:?}", req); + println!("Intercepting request: {req:?}"); // Set an extension that can be retrieved by `say_hello` req.extensions_mut().insert(MyExtension { diff --git a/examples/src/json-codec/client.rs b/examples/src/json-codec/client.rs index dd6305ef6..f5363ea3f 100644 --- a/examples/src/json-codec/client.rs +++ b/examples/src/json-codec/client.rs @@ -22,7 +22,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/json-codec/server.rs b/examples/src/json-codec/server.rs index 1029b0bf9..319949091 100644 --- a/examples/src/json-codec/server.rs +++ b/examples/src/json-codec/server.rs @@ -37,7 +37,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder() .add_service(GreeterServer::new(greeter)) diff --git a/examples/src/load_balance/client.rs b/examples/src/load_balance/client.rs index 8bb28061b..d4f21a659 100644 --- a/examples/src/load_balance/client.rs +++ b/examples/src/load_balance/client.rs @@ -22,7 +22,7 @@ async fn main() -> Result<(), Box> { let response = client.unary_echo(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); } Ok(()) diff --git a/examples/src/load_balance/server.rs b/examples/src/load_balance/server.rs index 7935e1cf5..1143ca57f 100644 --- a/examples/src/load_balance/server.rs +++ b/examples/src/load_balance/server.rs @@ -41,7 +41,7 @@ async fn main() -> Result<(), Box> { tokio::spawn(async move { if let Err(e) = serve.await { - eprintln!("Error = {:?}", e); + eprintln!("Error = {e:?}"); } tx.send(()).unwrap(); diff --git a/examples/src/mock/mock.rs b/examples/src/mock/mock.rs index fd6be0482..ae943a419 100644 --- a/examples/src/mock/mock.rs +++ b/examples/src/mock/mock.rs @@ -53,7 +53,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/multiplex/client.rs b/examples/src/multiplex/client.rs index c6b25cad3..c598afe2a 100644 --- a/examples/src/multiplex/client.rs +++ b/examples/src/multiplex/client.rs @@ -25,7 +25,7 @@ async fn main() -> Result<(), Box> { let response = greeter_client.say_hello(request).await?; - println!("GREETER RESPONSE={:?}", response); + println!("GREETER RESPONSE={response:?}"); let request = tonic::Request::new(EchoRequest { message: "hello".into(), @@ -33,7 +33,7 @@ async fn main() -> Result<(), Box> { let response = echo_client.unary_echo(request).await?; - println!("ECHO RESPONSE={:?}", response); + println!("ECHO RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/richer-error/client.rs b/examples/src/richer-error/client.rs index 3ccf35428..25167a7b0 100644 --- a/examples/src/richer-error/client.rs +++ b/examples/src/richer-error/client.rs @@ -29,15 +29,15 @@ async fn main() -> Result<(), Box> { if let Some(bad_request) = err_details.bad_request() { // Handle bad_request details - println!(" {:?}", bad_request); + println!(" {bad_request:?}"); } if let Some(help) = err_details.help() { // Handle help details - println!(" {:?}", help); + println!(" {help:?}"); } if let Some(localized_message) = err_details.localized_message() { // Handle localized_message details - println!(" {:?}", localized_message); + println!(" {localized_message:?}"); } println!(); @@ -46,7 +46,7 @@ async fn main() -> Result<(), Box> { } }; - println!(" Successful response received.\n\n {:?}\n", response); + println!(" Successful response received.\n\n {response:?}\n"); Ok(()) } diff --git a/examples/src/richer-error/client_vec.rs b/examples/src/richer-error/client_vec.rs index c852c61a4..973bef008 100644 --- a/examples/src/richer-error/client_vec.rs +++ b/examples/src/richer-error/client_vec.rs @@ -32,15 +32,15 @@ async fn main() -> Result<(), Box> { match err_detail { ErrorDetail::BadRequest(bad_request) => { // Handle bad_request details - println!(" {:?}", bad_request); + println!(" {bad_request:?}"); } ErrorDetail::Help(help) => { // Handle help details - println!(" {:?}", help); + println!(" {help:?}"); } ErrorDetail::LocalizedMessage(localized_message) => { // Handle localized_message details - println!(" {:?}", localized_message); + println!(" {localized_message:?}"); } _ => {} } @@ -52,7 +52,7 @@ async fn main() -> Result<(), Box> { } }; - println!(" Successful response received.\n\n {:?}\n", response); + println!(" Successful response received.\n\n {response:?}\n"); Ok(()) } diff --git a/examples/src/richer-error/server.rs b/examples/src/richer-error/server.rs index 235f8c157..9d390c0ae 100644 --- a/examples/src/richer-error/server.rs +++ b/examples/src/richer-error/server.rs @@ -49,7 +49,7 @@ impl Greeter for MyGreeter { } let reply = hello_world::HelloReply { - message: format!("Hello {}!", name), + message: format!("Hello {name}!"), }; Ok(Response::new(reply)) } @@ -60,7 +60,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder() .add_service(GreeterServer::new(greeter)) diff --git a/examples/src/richer-error/server_vec.rs b/examples/src/richer-error/server_vec.rs index 80093e55f..f988a117f 100644 --- a/examples/src/richer-error/server_vec.rs +++ b/examples/src/richer-error/server_vec.rs @@ -49,7 +49,7 @@ impl Greeter for MyGreeter { } let reply = hello_world::HelloReply { - message: format!("Hello {}!", name), + message: format!("Hello {name}!"), }; Ok(Response::new(reply)) } @@ -60,7 +60,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); Server::builder() .add_service(GreeterServer::new(greeter)) diff --git a/examples/src/routeguide/client.rs b/examples/src/routeguide/client.rs index 9de1477e6..231805d00 100644 --- a/examples/src/routeguide/client.rs +++ b/examples/src/routeguide/client.rs @@ -32,7 +32,7 @@ async fn print_features(client: &mut RouteGuideClient) -> Result<(), Bo .into_inner(); while let Some(feature) = stream.message().await? { - println!("FEATURE = {:?}", feature); + println!("FEATURE = {feature:?}"); } Ok(()) @@ -52,7 +52,7 @@ async fn run_record_route(client: &mut RouteGuideClient) -> Result<(), match client.record_route(request).await { Ok(response) => println!("SUMMARY: {:?}", response.into_inner()), - Err(e) => println!("something went wrong: {:?}", e), + Err(e) => println!("something went wrong: {e:?}"), } Ok(()) @@ -72,7 +72,7 @@ async fn run_route_chat(client: &mut RouteGuideClient) -> Result<(), Bo latitude: 409146138 + elapsed.as_secs() as i32, longitude: -746188906, }), - message: format!("at {:?}", elapsed), + message: format!("at {elapsed:?}"), }; yield note; @@ -83,7 +83,7 @@ async fn run_route_chat(client: &mut RouteGuideClient) -> Result<(), Bo let mut inbound = response.into_inner(); while let Some(note) = inbound.message().await? { - println!("NOTE = {:?}", note); + println!("NOTE = {note:?}"); } Ok(()) @@ -100,7 +100,7 @@ async fn main() -> Result<(), Box> { longitude: -746_188_906, })) .await?; - println!("RESPONSE = {:?}", response); + println!("RESPONSE = {response:?}"); println!("\n*** SERVER STREAMING ***"); print_features(&mut client).await?; diff --git a/examples/src/routeguide/server.rs b/examples/src/routeguide/server.rs index 93d456746..5c490e51e 100644 --- a/examples/src/routeguide/server.rs +++ b/examples/src/routeguide/server.rs @@ -50,7 +50,7 @@ impl RouteGuide for RouteGuideService { tokio::spawn(async move { for feature in &features[..] { if in_range(feature.location.as_ref().unwrap(), request.get_ref()) { - println!(" => send {:?}", feature); + println!(" => send {feature:?}"); tx.send(Ok(feature.clone())).await.unwrap(); } } @@ -76,7 +76,7 @@ impl RouteGuide for RouteGuideService { while let Some(point) = stream.next().await { let point = point?; - println!(" ==> Point = {:?}", point); + println!(" ==> Point = {point:?}"); // Increment the point count summary.point_count += 1; @@ -135,7 +135,7 @@ impl RouteGuide for RouteGuideService { async fn main() -> Result<(), Box> { let addr = "[::1]:10000".parse().unwrap(); - println!("RouteGuideServer listening on: {}", addr); + println!("RouteGuideServer listening on: {addr}"); let route_guide = RouteGuideService { features: Arc::new(data::load()), @@ -148,8 +148,6 @@ async fn main() -> Result<(), Box> { Ok(()) } -impl Eq for Point {} - fn in_range(point: &Point, rect: &Rectangle) -> bool { use std::cmp; diff --git a/examples/src/streaming/client.rs b/examples/src/streaming/client.rs index 546f244ff..a7568fca0 100644 --- a/examples/src/streaming/client.rs +++ b/examples/src/streaming/client.rs @@ -10,7 +10,7 @@ use pb::{echo_client::EchoClient, EchoRequest}; fn echo_requests_iter() -> impl Stream { tokio_stream::iter(1..usize::MAX).map(|i| EchoRequest { - message: format!("msg {:02}", i), + message: format!("msg {i:02}"), }) } diff --git a/examples/src/tls/client.rs b/examples/src/tls/client.rs index 8c98547f2..598b34a17 100644 --- a/examples/src/tls/client.rs +++ b/examples/src/tls/client.rs @@ -27,7 +27,7 @@ async fn main() -> Result<(), Box> { let response = client.unary_echo(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/tls_client_auth/client.rs b/examples/src/tls_client_auth/client.rs index 6670c67f5..32275a557 100644 --- a/examples/src/tls_client_auth/client.rs +++ b/examples/src/tls_client_auth/client.rs @@ -32,7 +32,7 @@ async fn main() -> Result<(), Box> { let response = client.unary_echo(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/tls_rustls/client.rs b/examples/src/tls_rustls/client.rs index 6aaa094ac..3db72ef9e 100644 --- a/examples/src/tls_rustls/client.rs +++ b/examples/src/tls_rustls/client.rs @@ -63,7 +63,7 @@ async fn main() -> Result<(), Box> { let response = client.unary_echo(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/tls_rustls/server.rs b/examples/src/tls_rustls/server.rs index 62045da02..26fe818dd 100644 --- a/examples/src/tls_rustls/server.rs +++ b/examples/src/tls_rustls/server.rs @@ -53,7 +53,7 @@ async fn main() -> Result<(), Box> { let (conn, addr) = match listener.accept().await { Ok(incoming) => incoming, Err(e) => { - eprintln!("Error accepting connection: {}", e); + eprintln!("Error accepting connection: {e}"); continue; } }; diff --git a/examples/src/tower/client.rs b/examples/src/tower/client.rs index 86a539aa6..dadaeedf3 100644 --- a/examples/src/tower/client.rs +++ b/examples/src/tower/client.rs @@ -27,14 +27,14 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } // An interceptor function. fn intercept(req: Request<()>) -> Result, Status> { - println!("received {:?}", req); + println!("received {req:?}"); Ok(req) } diff --git a/examples/src/tower/server.rs b/examples/src/tower/server.rs index 591e17671..9c891c502 100644 --- a/examples/src/tower/server.rs +++ b/examples/src/tower/server.rs @@ -35,7 +35,7 @@ async fn main() -> Result<(), Box> { let addr = "[::1]:50051".parse().unwrap(); let greeter = MyGreeter::default(); - println!("GreeterServer listening on {}", addr); + println!("GreeterServer listening on {addr}"); let svc = GreeterServer::new(greeter); diff --git a/examples/src/uds/client_standard.rs b/examples/src/uds/client_standard.rs index 264d41cfc..524d14940 100644 --- a/examples/src/uds/client_standard.rs +++ b/examples/src/uds/client_standard.rs @@ -23,7 +23,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/uds/client_with_connector.rs b/examples/src/uds/client_with_connector.rs index 9a09e6981..34f1282c7 100644 --- a/examples/src/uds/client_with_connector.rs +++ b/examples/src/uds/client_with_connector.rs @@ -35,7 +35,7 @@ async fn main() -> Result<(), Box> { let response = client.say_hello(request).await?; - println!("RESPONSE={:?}", response); + println!("RESPONSE={response:?}"); Ok(()) } diff --git a/examples/src/uds/server.rs b/examples/src/uds/server.rs index ccf9c91a8..d481d4104 100644 --- a/examples/src/uds/server.rs +++ b/examples/src/uds/server.rs @@ -30,7 +30,7 @@ impl Greeter for MyGreeter { #[cfg(unix)] { let conn_info = request.extensions().get::().unwrap(); - println!("Got a request {:?} with info {:?}", request, conn_info); + println!("Got a request {request:?} with info {conn_info:?}"); } let reply = hello_world::HelloReply { diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000..f257c3ba2 --- /dev/null +++ b/flake.lock @@ -0,0 +1,181 @@ +{ + "nodes": { + "fenix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-analyzer-src": "rust-analyzer-src" + }, + "locked": { + "lastModified": 1746081462, + "narHash": "sha256-WmJBaktb33WwqNn5BwdJghAoiBnvnPhgHSBksTrF5K8=", + "owner": "nix-community", + "repo": "fenix", + "rev": "e3be528e4f03538852ba49b413ec4ac843edeb60", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1743550720, + "narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "c621e8422220273271f52058f618c94e405bb0f5", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": "flake-compat", + "gitignore": "gitignore", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1742649964, + "narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1745930157, + "narHash": "sha256-y3h3NLnzRSiUkYpnfvnS669zWZLoqqI6NprtLQ+5dck=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "46e634be05ce9dc6d4db8e664515ba10b78151ae", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1743296961, + "narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, + "root": { + "inputs": { + "fenix": "fenix", + "flake-parts": "flake-parts", + "git-hooks": "git-hooks", + "nixpkgs": "nixpkgs", + "treefmt-nix": "treefmt-nix" + } + }, + "rust-analyzer-src": { + "flake": false, + "locked": { + "lastModified": 1746024678, + "narHash": "sha256-Q5J7+RoTPH4zPeu0Ka7iSXtXty228zKjS0Ed4R+ohpA=", + "owner": "rust-lang", + "repo": "rust-analyzer", + "rev": "5d66d45005fef79751294419ab9a9fa304dfdf5c", + "type": "github" + }, + "original": { + "owner": "rust-lang", + "ref": "nightly", + "repo": "rust-analyzer", + "type": "github" + } + }, + "treefmt-nix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1745929750, + "narHash": "sha256-k5ELLpTwRP/OElcLpNaFWLNf8GRDq4/eHBmFy06gGko=", + "owner": "numtide", + "repo": "treefmt-nix", + "rev": "82bf32e541b30080d94e46af13d46da0708609ea", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "treefmt-nix", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000..790151849 --- /dev/null +++ b/flake.nix @@ -0,0 +1,73 @@ +{ + description = "Description for the project"; + + inputs = { + flake-parts.url = "github:hercules-ci/flake-parts"; + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + + git-hooks = { + url = "github:cachix/git-hooks.nix"; + inputs = { nixpkgs.follows = "nixpkgs"; }; + }; + + treefmt-nix = { + url = "github:numtide/treefmt-nix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = inputs@{ self, flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ inputs.git-hooks.flakeModule inputs.treefmt-nix.flakeModule ]; + systems = + [ "x86_64-linux" "aarch64-linux" "aarch64-darwin" "x86_64-darwin" ]; + perSystem = { config, pkgs, system, ... }: + let rustToolchain = pkgs.fenix.stable; + in { + _module.args.pkgs = import inputs.nixpkgs { + inherit system; + overlays = [ inputs.fenix.overlays.default ]; + config = { }; + }; + + formatter = config.treefmt.build.wrapper; + checks.formatting = config.treefmt.build.check self; + + pre-commit = { + check.enable = true; + settings.hooks = { + actionlint.enable = true; + shellcheck.enable = true; + treefmt.enable = true; + }; + }; + + treefmt = { + settings = { rustfmt.enable = true; }; + projectRootFile = ".git/config"; + flakeCheck = false; # Covered by git-hooks check + }; + + devShells.default = pkgs.mkShell { + packages = with pkgs; [ + nixd + nixfmt + + (rustToolchain.withComponents [ + "cargo" + "clippy" + "rust-src" + "rustc" + "rustfmt" + "rust-analyzer" + ]) + protobuf + ]; + }; + }; + }; +} diff --git a/grpc/Cargo.toml b/grpc/Cargo.toml index b4776fbf9..7c9033ac2 100644 --- a/grpc/Cargo.toml +++ b/grpc/Cargo.toml @@ -3,44 +3,50 @@ name = "grpc" version = "0.9.0-alpha.1" edition = "2021" authors = ["gRPC Authors"] -license = "Apache-2.0" +license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +bytes = "1.10.1" futures-core = "0.3.31" futures-util = "0.3.31" +hickory-resolver = { version = "0.25.1", optional = true } http = "1.1.0" http-body = "1.0.1" +hyper = { version = "1.6.0", features = ["client", "http2"] } +hyper-util = "0.1.14" once_cell = "1.19.0" -rand = "0.8.5" -serde = { version = "1.0.217", features = ["derive"] } -serde_json = "1.0.135" -tokio = { version = "1.37.0", features = ["sync", "full"] } +parking_lot = "0.12.4" +pin-project-lite = "0.2.16" +protobuf = { version = "4.31.1-release" } +rand = "0.9" +serde = { version = "1.0.219", features = ["derive"] } +serde_json = "1.0.140" +socket2 = "0.5.10" +tokio = { version = "1.37.0", features = ["sync", "rt", "net", "time", "macros"] } tokio-stream = "0.1.17" -tonic = { version = "0.13.0", path = "../tonic", default-features = false, features = ["codegen", "transport"] } +tonic = { version = "0.14.0", path = "../tonic", default-features = false, features = ["codegen", "transport"] } +tower = "0.5.2" tower-service = "0.3.3" url = "2.5.0" -hickory-resolver = { version = "0.25.1", optional = true } -hyper-util = "0.1.14" -hyper = { version = "1.6.0", features = ["client", "http2"] } -tower = "0.5.2" -socket2 = "0.5.10" -pin-project-lite = "0.2.16" -protobuf = { version = "4.31.1-release" } -bytes = "1.10.1" + +[build-dependencies] +protobuf-codegen = { version = "4.31.1-release", optional = true } [dev-dependencies] async-stream = "0.3.6" -tonic = { version = "0.13.0", path = "../tonic", default-features = false, features = ["prost", "server", "router"] } +tonic = { version = "0.14.0", path = "../tonic", default-features = false, features = ["prost", "server", "router"] } hickory-server = "0.25.2" prost = "0.13.5" [features] -default = ["hickory_dns"] -hickory_dns = ["dep:hickory-resolver"] +default = ["dns"] +dns = ["dep:hickory-resolver"] test-data = ["dep:protobuf-codegen"] - -[build-dependencies] -protobuf-codegen = { version = "4.31.1-release", optional = true } +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "tonic::*", + "futures_core::stream::Stream", +] diff --git a/grpc/LICENSE b/grpc/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/grpc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/grpc/NOTICE.txt b/grpc/NOTICE.txt deleted file mode 100644 index 88316812f..000000000 --- a/grpc/NOTICE.txt +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2025 gRPC authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/grpc/examples/inmemory.rs b/grpc/examples/inmemory.rs index 1028232c1..abcbf7d8b 100644 --- a/grpc/examples/inmemory.rs +++ b/grpc/examples/inmemory.rs @@ -1,7 +1,6 @@ use std::any::Any; use futures_util::stream::StreamExt; -use grpc::client::load_balancing::pick_first; use grpc::client::transport; use grpc::service::{Message, Request, Response, Service}; use grpc::{client::ChannelOptions, inmemory}; @@ -38,7 +37,6 @@ impl Service for Handler { #[tokio::main] async fn main() { inmemory::reg(); - pick_first::reg(); // Spawn the server. let lis = inmemory::Listener::new(); @@ -53,7 +51,7 @@ async fn main() { println!("Creating channel for {}", lis.target()); let chan_opts = ChannelOptions::default().transport_registry(transport::GLOBAL_TRANSPORT_REGISTRY.clone()); - let chan = grpc::client::Channel::new(lis.target().as_str(), None, None, chan_opts); + let chan = grpc::client::Channel::new(lis.target().as_str(), None, chan_opts); let outbound = async_stream::stream! { yield Box::new(MyReqMessage("My Request 1".to_string())) as Box; diff --git a/grpc/examples/multiaddr.rs b/grpc/examples/multiaddr.rs index 51524a6c6..5c03cfd22 100644 --- a/grpc/examples/multiaddr.rs +++ b/grpc/examples/multiaddr.rs @@ -1,7 +1,6 @@ use std::any::Any; use futures_util::StreamExt; -use grpc::client::load_balancing::pick_first; use grpc::client::transport; use grpc::service::{Message, Request, Response, Service}; use grpc::{client::ChannelOptions, inmemory}; @@ -41,7 +40,6 @@ impl Service for Handler { #[tokio::main] async fn main() { inmemory::reg(); - pick_first::reg(); // Spawn the first server. let lis1 = inmemory::Listener::new(); @@ -77,7 +75,7 @@ async fn main() { println!("Creating channel for {target}"); let chan_opts = ChannelOptions::default().transport_registry(transport::GLOBAL_TRANSPORT_REGISTRY.clone()); - let chan = grpc::client::Channel::new(target.as_str(), None, None, chan_opts); + let chan = grpc::client::Channel::new(target.as_str(), None, chan_opts); let outbound = async_stream::stream! { yield Box::new(MyReqMessage("My Request 1".to_string())) as Box; diff --git a/grpc/src/attributes.rs b/grpc/src/attributes.rs index 8e6398d4c..7c61eda42 100644 --- a/grpc/src/attributes.rs +++ b/grpc/src/attributes.rs @@ -2,17 +2,23 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ diff --git a/grpc/src/byte_str.rs b/grpc/src/byte_str.rs new file mode 100644 index 000000000..971d3587c --- /dev/null +++ b/grpc/src/byte_str.rs @@ -0,0 +1,57 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +use core::str; +use std::ops::Deref; + +use bytes::Bytes; + +/// A cheaply cloneable and sliceable chunk of contiguous memory. +#[derive(Debug, Default, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct ByteStr { + // Invariant: bytes contains valid UTF-8 + bytes: Bytes, +} + +impl Deref for ByteStr { + type Target = str; + + #[inline] + fn deref(&self) -> &str { + let b: &[u8] = self.bytes.as_ref(); + // The invariant of `bytes` is that it contains valid UTF-8 allows us + // to unwrap. + str::from_utf8(b).unwrap() + } +} + +impl From for ByteStr { + #[inline] + fn from(src: String) -> ByteStr { + ByteStr { + // Invariant: src is a String so contains valid UTF-8. + bytes: Bytes::from(src), + } + } +} diff --git a/grpc/src/client/channel.rs b/grpc/src/client/channel.rs index 5c4b0a9fa..e41d32130 100644 --- a/grpc/src/client/channel.rs +++ b/grpc/src/client/channel.rs @@ -1,3 +1,27 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + use core::panic; use std::{ any::Any, @@ -40,8 +64,7 @@ use super::{ }; use super::{ name_resolution::{ - self, Address, ResolverBuilder, ResolverOptions, ResolverRegistry, ResolverUpdate, - GLOBAL_RESOLVER_REGISTRY, + self, global_registry, Address, ResolverBuilder, ResolverOptions, ResolverUpdate, }, subchannel, }; @@ -59,7 +82,7 @@ pub struct ChannelOptions { pub idle_timeout: Duration, pub transport_registry: Option, pub name_resolver_registry: Option, - pub lb_policy_registry: Option, + // TODO: pub lb_policy_registry: Option, // Typically we allow settings at the channel level that impact all RPCs, // but can also be set per-RPC. E.g.s: @@ -93,7 +116,6 @@ impl Default for ChannelOptions { max_retry_memory: 8 * 1024 * 1024, // 8MB -- ??? idle_timeout: Duration::from_secs(30 * 60), name_resolver_registry: None, - lb_policy_registry: None, default_request_extensions: vec![], transport_registry: None, } @@ -127,21 +149,27 @@ pub struct Channel { } impl Channel { + /// Constructs a new gRPC channel. A gRPC channel is a virtual, persistent + /// connection to a service. Channel creation cannot fail, but if the + /// target string is invalid, the returned channel will never connect, and + /// will fail all RPCs. + // TODO: should this return a Result instead? pub fn new( target: &str, credentials: Option>, - runtime: Option>, options: ChannelOptions, ) -> Self { + pick_first::reg(); Self { inner: Arc::new(PersistentChannel::new( target, credentials, - Arc::from(runtime.unwrap_or(Box::new(rt::tokio::TokioRuntime {}))), + Arc::new(rt::tokio::TokioRuntime {}), options, )), } } + // Waits until all outstanding RPCs are completed, then stops the client // (via "drop"? no, that makes no sense). Note that there probably needs to // be a way to add a timeout here or for the application to do a hard @@ -205,6 +233,10 @@ impl Channel { } } +// A PersistentChannel represents the static configuration of a channel and an +// optional Arc of an ActiveChannel. An ActiveChannel exists whenever the +// PersistentChannel is not IDLE. Every channel is IDLE at creation, or after +// some configurable timeout elapses without any any RPC activity. struct PersistentChannel { target: Url, options: ChannelOptions, @@ -217,7 +249,7 @@ impl PersistentChannel { // are not in ChannelOptions. fn new( target: &str, - credentials: Option>, + _credentials: Option>, runtime: Arc, options: ChannelOptions, ) -> Self { @@ -260,7 +292,7 @@ impl ActiveChannel { let resolver_helper = Box::new(tx.clone()); // TODO(arjan-bal): Return error here instead of panicking. - let rb = GLOBAL_RESOLVER_REGISTRY.get(target.scheme()).unwrap(); + let rb = global_registry().get(target.scheme()).unwrap(); let target = name_resolution::Target::from(target); let authority = target.authority_host_port(); let authority = if authority.is_empty() { @@ -507,9 +539,12 @@ impl GracefulSwitchBalancer { // TODO: config should come from ServiceConfig. let builder = self.policy_builder.lock().unwrap(); - let config = match builder.as_ref().unwrap().parse_config(&ParsedJsonLbConfig( - json!({"shuffleAddressList": true, "unknown_field": false}), - )) { + let config = match builder + .as_ref() + .unwrap() + .parse_config(&ParsedJsonLbConfig::from_value( + json!({"shuffleAddressList": true, "unknown_field": false}), + )) { Ok(cfg) => cfg, Err(e) => { return Err(e); diff --git a/grpc/src/client/load_balancing/child_manager.rs b/grpc/src/client/load_balancing/child_manager.rs index bc7c165db..0d4af6542 100644 --- a/grpc/src/client/load_balancing/child_manager.rs +++ b/grpc/src/client/load_balancing/child_manager.rs @@ -2,17 +2,23 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ diff --git a/grpc/src/client/load_balancing/mod.rs b/grpc/src/client/load_balancing/mod.rs index fa9e32767..01285501f 100644 --- a/grpc/src/client/load_balancing/mod.rs +++ b/grpc/src/client/load_balancing/mod.rs @@ -2,17 +2,23 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ @@ -51,9 +57,9 @@ pub mod pick_first; #[cfg(test)] pub mod test_utils; -mod registry; +pub(crate) mod registry; use super::{service_config::LbConfig, subchannel::SubchannelStateWatcher}; -pub use registry::{LbPolicyRegistry, GLOBAL_LB_REGISTRY}; +pub(crate) use registry::{LbPolicyRegistry, GLOBAL_LB_REGISTRY}; /// A collection of data configured on the channel that is constructing this /// LbPolicy. @@ -73,17 +79,36 @@ pub trait WorkScheduler: Send + Sync { fn schedule_work(&self); } -// Abstract representation of the configuration for any LB policy, stored as -// JSON. Hides internal storage details and includes a method to deserialize -// the JSON into a concrete policy struct. +/// Abstract representation of the configuration for any LB policy, stored as +/// JSON. Hides internal storage details and includes a method to deserialize +/// the JSON into a concrete policy struct. #[derive(Debug)] -pub struct ParsedJsonLbConfig(pub serde_json::Value); +pub struct ParsedJsonLbConfig { + value: serde_json::Value, +} impl ParsedJsonLbConfig { + /// Creates a new ParsedJsonLbConfig from the provided JSON string. + pub fn new(json: &str) -> Result { + match serde_json::from_str(json) { + Ok(value) => Ok(ParsedJsonLbConfig { value }), + Err(e) => Err(format!("failed to parse LB config JSON: {}", e)), + } + } + + pub(crate) fn from_value(value: serde_json::Value) -> Self { + Self { value } + } + + /// Converts the JSON configuration into a concrete type that represents the + /// configuration of an LB policy. + /// + /// This will typically be used by the LB policy builder to parse the + /// configuration into a type that can be used by the LB policy. pub fn convert_to( &self, ) -> Result> { - let res: T = match serde_json::from_value(self.0.clone()) { + let res: T = match serde_json::from_value(self.value.clone()) { Ok(v) => v, Err(e) => { return Err(format!("{}", e).into()); @@ -95,7 +120,7 @@ impl ParsedJsonLbConfig { /// An LB policy factory that produces LbPolicy instances used by the channel /// to manage connections and pick connections for RPCs. -pub trait LbPolicyBuilder: Send + Sync { +pub(crate) trait LbPolicyBuilder: Send + Sync { /// Builds and returns a new LB policy instance. /// /// Note that build must not fail. Any optional configuration is delivered @@ -114,7 +139,7 @@ pub trait LbPolicyBuilder: Send + Sync { /// default implementation returns Ok(None). fn parse_config( &self, - config: &ParsedJsonLbConfig, + _config: &ParsedJsonLbConfig, ) -> Result, Box> { Ok(None) } @@ -494,7 +519,7 @@ impl Drop for ExternalSubchannel { let isc = self.isc.take(); let _ = self.work_scheduler.send(WorkQueueItem::Closure(Box::new( move |c: &mut InternalChannelController| { - println!("unregistering connectivity state watcher for {}", address); + println!("unregistering connectivity state watcher for {:?}", address); isc.as_ref() .unwrap() .unregister_connectivity_state_watcher(watcher.unwrap()); diff --git a/grpc/src/client/load_balancing/pick_first.rs b/grpc/src/client/load_balancing/pick_first.rs index a3af4d7d9..f145ebaf0 100644 --- a/grpc/src/client/load_balancing/pick_first.rs +++ b/grpc/src/client/load_balancing/pick_first.rs @@ -10,9 +10,9 @@ use std::{ use crate::{ client::{ load_balancing::{ - ChannelController, Failing, LbPolicy, LbPolicyBuilder, LbPolicyOptions, LbState, - ParsedJsonLbConfig, Pick, PickResult, Picker, QueuingPicker, Subchannel, - ExternalSubchannel, SubchannelState, WorkScheduler, + ChannelController, ExternalSubchannel, Failing, LbPolicy, LbPolicyBuilder, + LbPolicyOptions, LbState, ParsedJsonLbConfig, Pick, PickResult, Picker, QueuingPicker, + Subchannel, SubchannelState, WorkScheduler, }, name_resolution::{Address, Endpoint, ResolverUpdate}, service_config::LbConfig, @@ -22,7 +22,7 @@ use crate::{ }; use once_cell::sync::Lazy; -use rand::{self, rngs::StdRng, seq::SliceRandom, thread_rng, Rng, RngCore, SeedableRng}; +use rand::{self, rng, rngs::StdRng, seq::SliceRandom, Rng, RngCore, SeedableRng}; use serde::{Deserialize, Serialize}; use serde_json::json; use tokio::time::sleep; @@ -32,14 +32,14 @@ type EndpointShuffler = dyn Fn(&mut [Endpoint]) + Send + Sync + 'static; pub static SHUFFLE_ENDPOINTS_FN: LazyLock>> = std::sync::LazyLock::new(|| { let shuffle_endpoints: Box = Box::new(|endpoints: &mut [Endpoint]| { - let mut rng = thread_rng(); + let mut rng = rng(); endpoints.shuffle(&mut rng); }); Mutex::new(shuffle_endpoints) }); pub(crate) fn thread_rng_shuffler() -> Box { Box::new(|endpoints: &mut [Endpoint]| { - let mut rng = thread_rng(); + let mut rng = rng(); endpoints.shuffle(&mut rng); }) } @@ -225,7 +225,7 @@ impl LbPolicy for PickFirstPolicy { } fn shuffle_endpoints(endpoints: &mut [Endpoint]) { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); endpoints.shuffle(&mut rng); } diff --git a/grpc/src/client/load_balancing/pick_first/tests.rs b/grpc/src/client/load_balancing/pick_first/tests.rs index 907e9c336..a330cb501 100644 --- a/grpc/src/client/load_balancing/pick_first/tests.rs +++ b/grpc/src/client/load_balancing/pick_first/tests.rs @@ -58,19 +58,21 @@ fn pickfirst_builder_parse_config_failure() -> Result<(), String> { } let test_cases = vec![ TestCase { - config: ParsedJsonLbConfig(json!({})), + config: ParsedJsonLbConfig::from_value(json!({})), want_shuffle_addresses: None, }, TestCase { - config: ParsedJsonLbConfig(json!({"shuffleAddressList": false})), + config: ParsedJsonLbConfig::from_value(json!({"shuffleAddressList": false})), want_shuffle_addresses: Some(false), }, TestCase { - config: ParsedJsonLbConfig(json!({"shuffleAddressList": true})), + config: ParsedJsonLbConfig::from_value(json!({"shuffleAddressList": true})), want_shuffle_addresses: Some(true), }, TestCase { - config: ParsedJsonLbConfig(json!({"shuffleAddressList": true, "unknownField": "foo"})), + config: ParsedJsonLbConfig::from_value( + json!({"shuffleAddressList": true, "unknownField": "foo"}), + ), want_shuffle_addresses: Some(true), }, ]; @@ -137,7 +139,7 @@ fn setup() -> ( fn create_endpoint_with_one_address(addr: String) -> Endpoint { Endpoint { addresses: vec![Address { - address: addr, + address: addr.into(), ..Default::default() }], ..Default::default() @@ -149,7 +151,7 @@ fn create_endpoint_with_n_addresses(n: usize) -> Endpoint { let mut addresses = Vec::new(); for i in 0..n { addresses.push(Address { - address: format!("{}.{}.{}.{}:{}", i, i, i, i, i), + address: format!("{}.{}.{}.{}:{}", i, i, i, i, i).into(), ..Default::default() }); } @@ -184,7 +186,7 @@ fn send_resolver_update_with_lb_config_to_policy( ..Default::default() }; - let json_config = ParsedJsonLbConfig(json!({"shuffleAddressList": true})); + let json_config = ParsedJsonLbConfig::from_value(json!({"shuffleAddressList": true})); let builder = GLOBAL_LB_REGISTRY.get_policy("pick_first").unwrap(); let config = builder.parse_config(&json_config).unwrap(); @@ -738,15 +740,15 @@ async fn pickfirst_with_multiple_backends_duplicate_addresses() { let endpoint = Endpoint { addresses: vec![ Address { - address: format!("{}.{}.{}.{}:{}", 0, 0, 0, 0, 0), + address: format!("{}.{}.{}.{}:{}", 0, 0, 0, 0, 0).into(), ..Default::default() }, Address { - address: format!("{}.{}.{}.{}:{}", 0, 0, 0, 0, 0), + address: format!("{}.{}.{}.{}:{}", 0, 0, 0, 0, 0).into(), ..Default::default() }, Address { - address: format!("{}.{}.{}.{}:{}", 1, 1, 1, 1, 1), + address: format!("{}.{}.{}.{}:{}", 1, 1, 1, 1, 1).into(), ..Default::default() }, ], @@ -755,11 +757,11 @@ async fn pickfirst_with_multiple_backends_duplicate_addresses() { let endpoint_with_duplicates_removed = Endpoint { addresses: vec![ Address { - address: format!("{}.{}.{}.{}:{}", 0, 0, 0, 0, 0), + address: format!("{}.{}.{}.{}:{}", 0, 0, 0, 0, 0).into(), ..Default::default() }, Address { - address: format!("{}.{}.{}.{}:{}", 1, 1, 1, 1, 1), + address: format!("{}.{}.{}.{}:{}", 1, 1, 1, 1, 1).into(), ..Default::default() }, ], diff --git a/grpc/src/client/load_balancing/registry.rs b/grpc/src/client/load_balancing/registry.rs index e21bd078e..de7a575d5 100644 --- a/grpc/src/client/load_balancing/registry.rs +++ b/grpc/src/client/load_balancing/registry.rs @@ -19,14 +19,14 @@ impl LbPolicyRegistry { Self { m: Arc::default() } } /// Add a LB policy into the registry. - pub fn add_builder(&self, builder: impl LbPolicyBuilder + 'static) { + pub(crate) fn add_builder(&self, builder: impl LbPolicyBuilder + 'static) { self.m .lock() .unwrap() .insert(builder.name().to_string(), Arc::new(builder)); } /// Retrieve a LB policy from the registry, or None if not found. - pub fn get_policy(&self, name: &str) -> Option> { + pub(crate) fn get_policy(&self, name: &str) -> Option> { self.m.lock().unwrap().get(name).cloned() } } diff --git a/grpc/src/client/load_balancing/test_utils.rs b/grpc/src/client/load_balancing/test_utils.rs index e9ab5c9b3..bae7216f0 100644 --- a/grpc/src/client/load_balancing/test_utils.rs +++ b/grpc/src/client/load_balancing/test_utils.rs @@ -83,7 +83,7 @@ impl Display for TestEvent { Self::NewSubchannel(addr, _) => write!(f, "NewSubchannel({})", addr), Self::UpdatePicker(state) => write!(f, "UpdatePicker({})", state.connectivity_state), Self::RequestResolution => write!(f, "RequestResolution"), - Self::Connect(addr) => write!(f, "Connect({})", addr.address), + Self::Connect(addr) => write!(f, "Connect({})", addr.address.to_string()), Self::ScheduleWork => write!(f, "ScheduleWork"), } } diff --git a/grpc/src/client/mod.rs b/grpc/src/client/mod.rs index a778986dc..66c809e62 100644 --- a/grpc/src/client/mod.rs +++ b/grpc/src/client/mod.rs @@ -2,28 +2,34 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ use std::fmt::Display; -pub mod load_balancing; +pub mod channel; +pub(crate) mod load_balancing; pub(crate) mod name_resolution; pub mod service_config; pub mod transport; -mod channel; mod subchannel; pub use channel::Channel; pub use channel::ChannelOptions; diff --git a/grpc/src/client/name_resolution/backoff.rs b/grpc/src/client/name_resolution/backoff.rs index 1199f5b19..6507ffc7e 100644 --- a/grpc/src/client/name_resolution/backoff.rs +++ b/grpc/src/client/name_resolution/backoff.rs @@ -1,7 +1,30 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + use rand::Rng; -use std::{sync::Mutex, time::Duration}; +use std::time::Duration; -/// TODO(arjan-bal): Move this #[derive(Clone)] pub struct BackoffConfig { /// The amount of time to backoff after the first failure. @@ -23,7 +46,7 @@ pub struct ExponentialBackoff { /// The delay for the next retry, without the random jitter. Store as f64 /// to avoid rounding errors. - next_delay_secs: Mutex, + next_delay_secs: f64, } /// This is a backoff configuration with the default values specified @@ -38,37 +61,51 @@ pub const DEFAULT_EXPONENTIAL_CONFIG: BackoffConfig = BackoffConfig { max_delay: Duration::from_secs(120), }; -impl ExponentialBackoff { - pub fn new(mut config: BackoffConfig) -> Self { - // Adjust params to get them in valid ranges. +impl BackoffConfig { + fn validate(&self) -> Result<(), &'static str> { + // Check that the arguments are in valid ranges. // 0 <= base_dealy <= max_delay - config.base_delay = config.base_delay.min(config.max_delay); + if self.base_delay > self.max_delay { + Err("base_delay must be greater than max_delay")?; + } // 1 <= multiplier - config.multiplier = config.multiplier.max(1.0); + if self.multiplier < 1.0 { + Err("multiplier must be greater than 1.0")?; + } // 0 <= jitter <= 1 - config.jitter = config.jitter.max(0.0); - config.jitter = config.jitter.min(1.0); + if self.jitter < 0.0 { + Err("jitter must be greater than or equal to 0")?; + } + if self.jitter > 1.0 { + Err("jitter must be less than or equal to 1")? + } + Ok(()) + } +} + +impl ExponentialBackoff { + pub fn new(config: BackoffConfig) -> Result { + config.validate()?; let next_delay_secs = config.base_delay.as_secs_f64(); - ExponentialBackoff { + Ok(ExponentialBackoff { config, - next_delay_secs: Mutex::new(next_delay_secs), - } + next_delay_secs, + }) } - pub fn reset(&self) { - let mut next_delay = self.next_delay_secs.lock().unwrap(); - *next_delay = self.config.base_delay.as_secs_f64(); + pub fn reset(&mut self) { + self.next_delay_secs = self.config.base_delay.as_secs_f64(); } - pub fn backoff_duration(&self) -> Duration { - let mut next_delay = self.next_delay_secs.lock().unwrap(); + pub fn backoff_duration(&mut self) -> Duration { + let next_delay = self.next_delay_secs; let cur_delay = - *next_delay * (1.0 + self.config.jitter * rand::thread_rng().gen_range(-1.0..1.0)); - *next_delay = self + next_delay * (1.0 + self.config.jitter * rand::rng().random_range(-1.0..1.0)); + self.next_delay_secs = self .config .max_delay .as_secs_f64() - .min(*next_delay * self.config.multiplier); + .min(next_delay * self.config.multiplier); Duration::from_secs_f64(cur_delay) } } @@ -77,12 +114,20 @@ impl ExponentialBackoff { mod tests { use std::time::Duration; - use crate::client::name_resolution::backoff::{BackoffConfig, ExponentialBackoff}; + use crate::client::name_resolution::backoff::{ + BackoffConfig, ExponentialBackoff, DEFAULT_EXPONENTIAL_CONFIG, + }; // Epsilon for floating point comparisons if needed, though Duration // comparisons are often better. const EPSILON: f64 = 1e-9; + #[test] + fn default_config_is_valid() { + let result = ExponentialBackoff::new(DEFAULT_EXPONENTIAL_CONFIG.clone()); + assert_eq!(result.is_ok(), true); + } + #[test] fn base_less_than_max() { let config = BackoffConfig { @@ -91,7 +136,7 @@ mod tests { jitter: 0.0, max_delay: Duration::from_secs(100), }; - let backoff = ExponentialBackoff::new(config.clone()); + let mut backoff = ExponentialBackoff::new(config).unwrap(); assert_eq!(backoff.backoff_duration(), Duration::from_secs(10)); } @@ -103,8 +148,8 @@ mod tests { base_delay: Duration::from_secs(100), max_delay: Duration::from_secs(10), }; - let backoff = ExponentialBackoff::new(config.clone()); - assert_eq!(backoff.backoff_duration(), Duration::from_secs(10)); + let result = ExponentialBackoff::new(config); + assert_eq!(result.is_err(), true); } #[test] @@ -115,10 +160,8 @@ mod tests { base_delay: Duration::from_secs(10), max_delay: Duration::from_secs(100), }; - let backoff = ExponentialBackoff::new(config.clone()); - // multiplier gets clipped to 1. - assert_eq!(backoff.backoff_duration(), Duration::from_secs(10)); - assert_eq!(backoff.backoff_duration(), Duration::from_secs(10)); + let result = ExponentialBackoff::new(config); + assert_eq!(result.is_err(), true); } #[test] @@ -129,10 +172,8 @@ mod tests { base_delay: Duration::from_secs(10), max_delay: Duration::from_secs(100), }; - let backoff = ExponentialBackoff::new(config.clone()); - // jitter gets clipped to 0. - assert_eq!(backoff.backoff_duration(), Duration::from_secs(10)); - assert_eq!(backoff.backoff_duration(), Duration::from_secs(10)); + let result = ExponentialBackoff::new(config); + assert_eq!(result.is_err(), true); } #[test] @@ -143,16 +184,8 @@ mod tests { base_delay: Duration::from_secs(10), max_delay: Duration::from_secs(100), }; - let backoff = ExponentialBackoff::new(config.clone()); - // jitter gets clipped to 1. - // 0 <= duration <= 20. - let duration = backoff.backoff_duration(); - assert!(duration.lt(&Duration::from_secs(20))); - assert!(duration.gt(&Duration::from_secs(0))); - - let duration = backoff.backoff_duration(); - assert!(duration.lt(&Duration::from_secs(20))); - assert!(duration.gt(&Duration::from_secs(0))); + let result = ExponentialBackoff::new(config); + assert_eq!(result.is_err(), true); } #[test] @@ -163,7 +196,7 @@ mod tests { base_delay: Duration::from_secs(1), max_delay: Duration::from_secs(15), }; - let backoff = ExponentialBackoff::new(config.clone()); + let mut backoff = ExponentialBackoff::new(config.clone()).unwrap(); assert_eq!(backoff.backoff_duration(), Duration::from_secs(1)); assert_eq!(backoff.backoff_duration(), Duration::from_secs(2)); assert_eq!(backoff.backoff_duration(), Duration::from_secs(4)); @@ -191,18 +224,18 @@ mod tests { base_delay: Duration::from_secs(1), max_delay: Duration::from_secs(15), }; - let backoff = ExponentialBackoff::new(config.clone()); + let mut backoff = ExponentialBackoff::new(config.clone()).unwrap(); // 0.8 <= duration <= 1.2. let duration = backoff.backoff_duration(); - assert!(duration.gt(&Duration::from_secs_f64(0.8 - EPSILON))); - assert!(duration.lt(&Duration::from_secs_f64(1.2 + EPSILON))); + assert_eq!(duration.gt(&Duration::from_secs_f64(0.8 - EPSILON)), true); + assert_eq!(duration.lt(&Duration::from_secs_f64(1.2 + EPSILON)), true); // 1.6 <= duration <= 2.4. let duration = backoff.backoff_duration(); - assert!(duration.gt(&Duration::from_secs_f64(1.6 - EPSILON))); - assert!(duration.lt(&Duration::from_secs_f64(2.4 + EPSILON))); + assert_eq!(duration.gt(&Duration::from_secs_f64(1.6 - EPSILON)), true); + assert_eq!(duration.lt(&Duration::from_secs_f64(2.4 + EPSILON)), true); // 3.2 <= duration <= 4.8. let duration = backoff.backoff_duration(); - assert!(duration.gt(&Duration::from_secs_f64(3.2 - EPSILON))); - assert!(duration.lt(&Duration::from_secs_f64(4.8 + EPSILON))); + assert_eq!(duration.gt(&Duration::from_secs_f64(3.2 - EPSILON)), true); + assert_eq!(duration.lt(&Duration::from_secs_f64(4.8 + EPSILON)), true); } } diff --git a/grpc/src/client/name_resolution/dns/mod.rs b/grpc/src/client/name_resolution/dns/mod.rs index 14843ee1d..5c05127fd 100644 --- a/grpc/src/client/name_resolution/dns/mod.rs +++ b/grpc/src/client/name_resolution/dns/mod.rs @@ -1,27 +1,52 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + //! This module implements a DNS resolver to be installed as the default resolver //! in grpc. use std::{ net::{IpAddr, SocketAddr}, - sync::{Arc, Mutex}, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, time::{Duration, SystemTime}, }; -use once_cell::sync::Lazy; -use tokio::sync::mpsc::UnboundedSender; +use parking_lot::Mutex; +use tokio::sync::Notify; use url::Host; use crate::{ - client::name_resolution::{ - passthrough::{self, NopResolver}, - Address, ResolverUpdate, TCP_IP_NETWORK_TYPE, - }, - rt, + byte_str::ByteStr, + client::name_resolution::{global_registry, ChannelController, ResolverBuilder, Target}, + rt::{self, TaskHandle}, }; use super::{ - backoff::{self, BackoffConfig, ExponentialBackoff, DEFAULT_EXPONENTIAL_CONFIG}, - Endpoint, Resolver, ResolverBuilder, GLOBAL_RESOLVER_REGISTRY, + backoff::{BackoffConfig, ExponentialBackoff, DEFAULT_EXPONENTIAL_CONFIG}, + Address, Endpoint, NopResolver, Resolver, ResolverOptions, ResolverUpdate, TCP_IP_NETWORK_TYPE, }; #[cfg(test)] @@ -36,31 +61,47 @@ const DEFAULT_DNS_PORT: u16 = 53; /// /// It is recommended to set this value at application startup. Avoid modifying /// this variable after initialization. -static RESOLVING_TIMEOUT: Lazy> = Lazy::new(|| Mutex::new(Duration::from_secs(30))); +static RESOLVING_TIMEOUT_MS: AtomicU64 = AtomicU64::new(30_000); // 30 seconds /// This is the minimum interval at which re-resolutions are allowed. This helps /// to prevent excessive re-resolution. -static MIN_RESOLUTION_INTERVAL: Lazy> = - Lazy::new(|| Mutex::new(Duration::from_secs(30))); +static MIN_RESOLUTION_INTERVAL_MS: AtomicU64 = AtomicU64::new(30_000); // 30 seconds -pub fn get_resolving_timeout() -> Duration { - *RESOLVING_TIMEOUT.lock().unwrap() +fn get_resolving_timeout() -> Duration { + Duration::from_millis(RESOLVING_TIMEOUT_MS.load(Ordering::Relaxed)) } +/// Sets the maximum duration for DNS resolution requests. +/// +/// This function affects the global timeout used by all channels using the DNS +/// name resolver scheme. +/// +/// It must be called only at application startup, before any gRPC calls are +/// made. +/// +/// The default value is 30 seconds. Setting the timeout too low may result in +/// premature timeouts during resolution, while setting it too high may lead to +/// unnecessary delays in service discovery. Choose a value appropriate for your +/// specific needs and network environment. pub fn set_resolving_timeout(duration: Duration) { - *RESOLVING_TIMEOUT.lock().unwrap() = duration; + RESOLVING_TIMEOUT_MS.store(duration.as_millis() as u64, Ordering::Relaxed); } -pub fn get_min_resolution_interval() -> Duration { - *MIN_RESOLUTION_INTERVAL.lock().unwrap() +fn get_min_resolution_interval() -> Duration { + Duration::from_millis(MIN_RESOLUTION_INTERVAL_MS.load(Ordering::Relaxed)) } +/// Sets the default minimum interval at which DNS re-resolutions are allowed. +/// This helps to prevent excessive re-resolution. +/// +/// It must be called only at application startup, before any gRPC calls are +/// made. pub fn set_min_resolution_interval(duration: Duration) { - *MIN_RESOLUTION_INTERVAL.lock().unwrap() = duration; + MIN_RESOLUTION_INTERVAL_MS.store(duration.as_millis() as u64, Ordering::Relaxed); } pub fn reg() { - GLOBAL_RESOLVER_REGISTRY.add_builder(Box::new(Builder {})); + global_registry().add_builder(Box::new(Builder {})); } struct Builder {} @@ -69,58 +110,42 @@ struct DnsOptions { min_resolution_interval: Duration, resolving_timeout: Duration, backoff_config: BackoffConfig, + host: String, + port: u16, } impl DnsResolver { fn new( - target: &super::Target, - options: super::ResolverOptions, + dns_client: Box, + options: ResolverOptions, dns_opts: DnsOptions, - ) -> Box { - let parsed = match parse_endpoint_and_authority(target) { - Ok(res) => res, - Err(err) => return nop_resolver_for_err(err.to_string(), options), - }; - let endpoint = parsed.endpoint; - let host = match endpoint.host { - Host::Domain(d) => d, - Host::Ipv4(ipv4) => { - return nop_resolver_for_ip(IpAddr::V4(ipv4), endpoint.port, options) - } - Host::Ipv6(ipv6) => { - return nop_resolver_for_ip(IpAddr::V6(ipv6), endpoint.port, options) - } - }; - let authority = parsed.authority; - let dns = match options.runtime.get_dns_resolver(rt::ResolverOptions { - server_addr: authority, - }) { - Ok(dns) => dns, - Err(err) => return nop_resolver_for_err(err.to_string(), options), - }; + ) -> Self { let state = Arc::new(Mutex::new(InternalState { addrs: Ok(Vec::new()), + channel_response: None, })); let state_copy = state.clone(); - let (resolve_now_tx, mut resolve_now_rx) = tokio::sync::mpsc::unbounded_channel::<()>(); - let (update_error_tx, update_error_rx) = - tokio::sync::mpsc::unbounded_channel::>(); - - let handle = options.runtime.clone().spawn(Box::pin(async move { - let backoff = ExponentialBackoff::new(dns_opts.backoff_config.clone()); + let resolve_now_notify = Arc::new(Notify::new()); + let channel_updated_notify = Arc::new(Notify::new()); + let channel_updated_rx = channel_updated_notify.clone(); + let resolve_now_rx = resolve_now_notify.clone(); + + let runtime = options.runtime.clone(); + let work_scheduler = options.work_scheduler.clone(); + let handle = options.runtime.spawn(Box::pin(async move { + let mut backoff = ExponentialBackoff::new(dns_opts.backoff_config.clone()) + .expect("default exponential config must be valid"); let state = state_copy; - let work_scheduler = options.work_scheduler; - let mut update_error_rx = update_error_rx; loop { - let mut lookup_fut = dns.lookup_host_name(&host); - let mut timeout_fut = options.runtime.sleep(dns_opts.resolving_timeout); + let mut lookup_fut = dns_client.lookup_host_name(&dns_opts.host); + let mut timeout_fut = runtime.sleep(dns_opts.resolving_timeout); let addrs = tokio::select! { result = &mut lookup_fut => { match result { Ok(ips) => { let addrs = ips .into_iter() - .map(|ip| SocketAddr::new(ip, endpoint.port)) + .map(|ip| SocketAddr::new(ip, dns_opts.port)) .collect(); Ok(addrs) } @@ -132,68 +157,81 @@ impl DnsResolver { } }; { - let mut internal_state = match state.lock() { - Ok(state) => state, - Err(_) => return, - }; - internal_state.addrs = addrs; + state.lock().addrs = addrs; } work_scheduler.schedule_work(); - let update_result = match update_error_rx.recv().await { - Some(res) => res, - None => return, - }; - let next_resoltion_time: SystemTime; - if update_result.is_err() { - next_resoltion_time = SystemTime::now() + channel_updated_rx.notified().await; + let channel_response = { state.lock().channel_response.take() }; + let next_resoltion_time = if let Some(_) = channel_response { + SystemTime::now() .checked_add(backoff.backoff_duration()) - .unwrap(); + .unwrap() } else { // Success resolving, wait for the next resolve_now. However, // also wait MIN_RESOLUTION_INTERVAL at the very least to prevent // constantly re-resolving. backoff.reset(); - next_resoltion_time = SystemTime::now() + let res_time = SystemTime::now() .checked_add(dns_opts.min_resolution_interval) .unwrap(); - _ = resolve_now_rx.recv().await; - } + _ = resolve_now_rx.notified().await; + res_time + }; // Wait till next resolution time. - match next_resoltion_time.duration_since(SystemTime::now()) { - Ok(d) => options.runtime.sleep(d).await, - Err(_) => continue, // Time has already passed. + let Ok(duration) = next_resoltion_time.duration_since(SystemTime::now()) else { + continue; // Time has already passed. }; + runtime.sleep(duration).await; } })); - Box::new(DnsResolver { + Self { state, task_handle: handle, - resolve_now_requester: resolve_now_tx, - update_error_sender: update_error_tx, - }) + resolve_now_notifier: resolve_now_notify, + channel_update_notifier: channel_updated_notify, + } } } impl ResolverBuilder for Builder { - fn build( - &self, - target: &super::Target, - options: super::ResolverOptions, - ) -> Box { + fn build(&self, target: &Target, options: ResolverOptions) -> Box { + let parsed = match parse_endpoint_and_authority(target) { + Ok(res) => res, + Err(err) => return nop_resolver_for_err(err.to_string(), options), + }; + let endpoint = parsed.endpoint; + let host = match endpoint.host { + Host::Domain(d) => d, + Host::Ipv4(ipv4) => { + return nop_resolver_for_ip(IpAddr::V4(ipv4), endpoint.port, options) + } + Host::Ipv6(ipv6) => { + return nop_resolver_for_ip(IpAddr::V6(ipv6), endpoint.port, options) + } + }; + let authority = parsed.authority; + let dns_client = match options.runtime.get_dns_resolver(rt::ResolverOptions { + server_addr: authority, + }) { + Ok(dns) => dns, + Err(err) => return nop_resolver_for_err(err.to_string(), options), + }; let dns_opts = DnsOptions { min_resolution_interval: get_min_resolution_interval(), resolving_timeout: get_resolving_timeout(), backoff_config: DEFAULT_EXPONENTIAL_CONFIG, + host, + port: endpoint.port, }; - DnsResolver::new(target, options, dns_opts) + Box::new(DnsResolver::new(dns_client, options, dns_opts)) } - fn scheme(&self) -> &str { + fn scheme(&self) -> &'static str { "dns" } - fn is_valid_uri(&self, target: &super::Target) -> bool { + fn is_valid_uri(&self, target: &Target) -> bool { if let Err(err) = parse_endpoint_and_authority(target) { eprintln!("{}", err); false @@ -205,28 +243,24 @@ impl ResolverBuilder for Builder { struct DnsResolver { state: Arc>, - task_handle: Box, - resolve_now_requester: UnboundedSender<()>, - update_error_sender: UnboundedSender>, + task_handle: Box, + resolve_now_notifier: Arc, + channel_update_notifier: Arc, } struct InternalState { addrs: Result, String>, + // Error from the latest call to channel_controller.update(). + channel_response: Option, } impl Resolver for DnsResolver { fn resolve_now(&mut self) { - _ = self.resolve_now_requester.send(()); + self.resolve_now_notifier.notify_one(); } - fn work(&mut self, channel_controller: &mut dyn super::ChannelController) { - let state = match self.state.lock() { - Err(_) => { - eprintln!("DNS resolver mutex poisoned, can't update channel"); - return; - } - Ok(s) => s, - }; + fn work(&mut self, channel_controller: &mut dyn ChannelController) { + let mut state = self.state.lock(); let endpoint_result = match &state.addrs { Ok(addrs) => { let endpoints: Vec<_> = addrs @@ -234,7 +268,7 @@ impl Resolver for DnsResolver { .map(|a| Endpoint { addresses: vec![Address { network_type: TCP_IP_NETWORK_TYPE, - address: a.to_string(), + address: ByteStr::from(a.to_string()), ..Default::default() }], ..Default::default() @@ -249,7 +283,8 @@ impl Resolver for DnsResolver { ..Default::default() }; let status = channel_controller.update(update); - _ = self.update_error_sender.send(status); + state.channel_response = status.err(); + self.channel_update_notifier.notify_one(); } } @@ -271,7 +306,7 @@ struct ParseResult { authority: Option, } -fn parse_endpoint_and_authority(target: &super::Target) -> Result { +fn parse_endpoint_and_authority(target: &Target) -> Result { // Parse the endpoint. let endpoint = target.path(); let endpoint = endpoint.strip_prefix("/").unwrap_or(endpoint); @@ -311,7 +346,7 @@ fn parse_endpoint_and_authority(target: &super::Target) -> Result is returned. +/// Ok(None) is returned. fn parse_host_port(host_and_port: &str, default_port: u16) -> Result, String> { // We need to use the https scheme otherwise url::Url::parse doesn't convert // IP addresses to Host::Ipv4 or Host::Ipv6 if they could represent valid @@ -332,18 +367,14 @@ fn parse_host_port(host_and_port: &str, default_port: u16) -> Result Box { +fn nop_resolver_for_ip(ip: IpAddr, port: u16, options: ResolverOptions) -> Box { options.work_scheduler.schedule_work(); Box::new(NopResolver { update: ResolverUpdate { endpoints: Ok(vec![Endpoint { addresses: vec![Address { network_type: TCP_IP_NETWORK_TYPE, - address: SocketAddr::new(ip, port).to_string(), + address: ByteStr::from(SocketAddr::new(ip, port).to_string()), ..Default::default() }], ..Default::default() @@ -353,7 +384,7 @@ fn nop_resolver_for_ip( }) } -fn nop_resolver_for_err(err: String, options: super::ResolverOptions) -> Box { +fn nop_resolver_for_err(err: String, options: ResolverOptions) -> Box { options.work_scheduler.schedule_work(); Box::new(NopResolver { update: ResolverUpdate { diff --git a/grpc/src/client/name_resolution/dns/test.rs b/grpc/src/client/name_resolution/dns/test.rs index bc84ac712..d0ea96ed8 100644 --- a/grpc/src/client/name_resolution/dns/test.rs +++ b/grpc/src/client/name_resolution/dns/test.rs @@ -1,18 +1,49 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + use std::{future::Future, pin::Pin, sync::Arc, time::Duration}; -use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; +use tokio::sync::mpsc::{self, UnboundedSender}; +use url::Host; use crate::{ - client::name_resolution::{ - self, - backoff::{BackoffConfig, DEFAULT_EXPONENTIAL_CONFIG}, - dns::{self, parse_endpoint_and_authority, HostPort}, - ResolverOptions, ResolverUpdate, Target, GLOBAL_RESOLVER_REGISTRY, + client::{ + name_resolution::{ + backoff::{BackoffConfig, DEFAULT_EXPONENTIAL_CONFIG}, + dns::{ + get_min_resolution_interval, get_resolving_timeout, parse_endpoint_and_authority, + reg, DnsResolver, HostPort, + }, + global_registry, ChannelController, Resolver, ResolverOptions, ResolverUpdate, Target, + WorkScheduler, + }, + service_config::ServiceConfig, }, rt::{self, tokio::TokioRuntime}, }; -use super::ParseResult; +use super::{DnsOptions, ParseResult}; const DEFAULT_TEST_SHORT_TIMEOUT: Duration = Duration::from_millis(10); @@ -27,7 +58,7 @@ pub fn target_parsing() { input: "dns:///grpc.io", want_result: Ok(ParseResult { endpoint: HostPort { - host: url::Host::Domain("grpc.io".to_string()), + host: Host::Domain("grpc.io".to_string()), port: 443, }, authority: None, @@ -37,7 +68,7 @@ pub fn target_parsing() { input: "dns:///grpc.io:1234", want_result: Ok(ParseResult { endpoint: HostPort { - host: url::Host::Domain("grpc.io".to_string()), + host: Host::Domain("grpc.io".to_string()), port: 1234, }, authority: None, @@ -47,7 +78,7 @@ pub fn target_parsing() { input: "dns://8.8.8.8/grpc.io:1234", want_result: Ok(ParseResult { endpoint: HostPort { - host: url::Host::Domain("grpc.io".to_string()), + host: Host::Domain("grpc.io".to_string()), port: 1234, }, authority: Some("8.8.8.8:53".parse().unwrap()), @@ -57,7 +88,7 @@ pub fn target_parsing() { input: "dns://8.8.8.8:5678/grpc.io:1234/abc", want_result: Ok(ParseResult { endpoint: HostPort { - host: url::Host::Domain("grpc.io".to_string()), + host: Host::Domain("grpc.io".to_string()), port: 1234, }, authority: Some("8.8.8.8:5678".parse().unwrap()), @@ -67,7 +98,7 @@ pub fn target_parsing() { input: "dns://[::1]:5678/grpc.io:1234/abc", want_result: Ok(ParseResult { endpoint: HostPort { - host: url::Host::Domain("grpc.io".to_string()), + host: Host::Domain("grpc.io".to_string()), port: 1234, }, authority: Some("[::1]:5678".parse().unwrap()), @@ -77,7 +108,7 @@ pub fn target_parsing() { input: "dns://[fe80::1]:5678/127.0.0.1:1234/abc", want_result: Ok(ParseResult { endpoint: HostPort { - host: url::Host::Ipv4("127.0.0.1".parse().unwrap()), + host: Host::Ipv4("127.0.0.1".parse().unwrap()), port: 1234, }, authority: Some("[fe80::1]:5678".parse().unwrap()), @@ -99,7 +130,7 @@ pub fn target_parsing() { input: "dns:///grpc.io:/", want_result: Ok(ParseResult { endpoint: HostPort { - host: url::Host::Domain("grpc.io".to_string()), + host: Host::Domain("grpc.io".to_string()), port: 443, }, authority: None, @@ -132,11 +163,11 @@ pub fn target_parsing() { } } -struct WorkScheduler { +struct FakeWorkScheduler { work_tx: UnboundedSender<()>, } -impl name_resolution::WorkScheduler for WorkScheduler { +impl WorkScheduler for FakeWorkScheduler { fn schedule_work(&self) { self.work_tx.send(()).unwrap(); } @@ -147,28 +178,25 @@ struct FakeChannelController { update_tx: UnboundedSender, } -impl name_resolution::ChannelController for FakeChannelController { - fn update(&mut self, update: name_resolution::ResolverUpdate) -> Result<(), String> { +impl ChannelController for FakeChannelController { + fn update(&mut self, update: ResolverUpdate) -> Result<(), String> { println!("Received resolver update: {:?}", &update); self.update_tx.send(update).unwrap(); self.update_result.clone() } - fn parse_service_config( - &self, - config: &str, - ) -> Result { + fn parse_service_config(&self, _: &str) -> Result { Err("Unimplemented".to_string()) } } #[tokio::test] pub async fn dns_basic() { - super::reg(); - let builder = GLOBAL_RESOLVER_REGISTRY.get("dns").unwrap(); + reg(); + let builder = global_registry().get("dns").unwrap(); let target = &"dns:///localhost:1234".parse().unwrap(); let (work_tx, mut work_rx) = mpsc::unbounded_channel(); - let work_scheduler = Arc::new(WorkScheduler { + let work_scheduler = Arc::new(FakeWorkScheduler { work_tx: work_tx.clone(), }); let opts = ResolverOptions { @@ -179,7 +207,7 @@ pub async fn dns_basic() { let mut resolver = builder.build(target, opts); // Wait for schedule work to be called. - work_rx.recv().await.unwrap(); + let _ = work_rx.recv().await.unwrap(); let (update_tx, mut update_rx) = mpsc::unbounded_channel(); let mut channel_controller = FakeChannelController { update_tx, @@ -188,16 +216,16 @@ pub async fn dns_basic() { resolver.work(&mut channel_controller); // A successful endpoint update should be received. let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.unwrap().len() > 1); + assert_eq!(update.endpoints.unwrap().len() > 1, true); } #[tokio::test] pub async fn invalid_target() { - super::reg(); - let builder = GLOBAL_RESOLVER_REGISTRY.get("dns").unwrap(); + reg(); + let builder = global_registry().get("dns").unwrap(); let target = &"dns:///:1234".parse().unwrap(); let (work_tx, mut work_rx) = mpsc::unbounded_channel(); - let work_scheduler = Arc::new(WorkScheduler { + let work_scheduler = Arc::new(FakeWorkScheduler { work_tx: work_tx.clone(), }); let opts = ResolverOptions { @@ -208,7 +236,7 @@ pub async fn invalid_target() { let mut resolver = builder.build(target, opts); // Wait for schedule work to be called. - work_rx.recv().await.unwrap(); + let _ = work_rx.recv().await.unwrap(); let (update_tx, mut update_rx) = mpsc::unbounded_channel(); let mut channel_controller = FakeChannelController { update_tx, @@ -217,12 +245,13 @@ pub async fn invalid_target() { resolver.work(&mut channel_controller); // An error endpoint update should be received. let update = update_rx.recv().await.unwrap(); - assert!( + assert_eq!( update .endpoints .err() .unwrap() - .contains(&target.to_string()) + .contains(&target.to_string()), + true ); } @@ -234,12 +263,12 @@ struct FakeDns { #[tonic::async_trait] impl rt::DnsResolver for FakeDns { - async fn lookup_host_name(&self, name: &str) -> Result, String> { + async fn lookup_host_name(&self, _: &str) -> Result, String> { tokio::time::sleep(self.latency).await; self.lookup_result.clone() } - async fn lookup_txt(&self, name: &str) -> Result, String> { + async fn lookup_txt(&self, _: &str) -> Result, String> { Err("unimplemented".to_string()) } } @@ -257,10 +286,7 @@ impl rt::Runtime for FakeRuntime { self.inner.spawn(task) } - fn get_dns_resolver( - &self, - opts: rt::ResolverOptions, - ) -> Result, String> { + fn get_dns_resolver(&self, _: rt::ResolverOptions) -> Result, String> { Ok(Box::new(self.dns.clone())) } @@ -273,17 +299,17 @@ impl rt::Runtime for FakeRuntime { target: std::net::SocketAddr, opts: rt::TcpOptions, ) -> Pin, String>> + Send>> { - unimplemented!() + panic!() } } #[tokio::test] pub async fn dns_lookup_error() { - super::reg(); - let builder = GLOBAL_RESOLVER_REGISTRY.get("dns").unwrap(); + reg(); + let builder = global_registry().get("dns").unwrap(); let target = &"dns:///grpc.io:1234".parse().unwrap(); let (work_tx, mut work_rx) = mpsc::unbounded_channel(); - let work_scheduler = Arc::new(WorkScheduler { + let work_scheduler = Arc::new(FakeWorkScheduler { work_tx: work_tx.clone(), }); let runtime = FakeRuntime { @@ -301,7 +327,7 @@ pub async fn dns_lookup_error() { let mut resolver = builder.build(target, opts); // Wait for schedule work to be called. - work_rx.recv().await.unwrap(); + let _ = work_rx.recv().await.unwrap(); let (update_tx, mut update_rx) = mpsc::unbounded_channel(); let mut channel_controller = FakeChannelController { update_tx, @@ -310,16 +336,13 @@ pub async fn dns_lookup_error() { resolver.work(&mut channel_controller); // An error endpoint update should be received. let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.err().unwrap().contains("test_error")); + assert_eq!(update.endpoints.err().unwrap().contains("test_error"), true); } #[tokio::test] pub async fn dns_lookup_timeout() { - super::reg(); - let builder = GLOBAL_RESOLVER_REGISTRY.get("dns").unwrap(); - let target = &"dns:///grpc.io:1234".parse().unwrap(); let (work_tx, mut work_rx) = mpsc::unbounded_channel(); - let work_scheduler = Arc::new(WorkScheduler { + let work_scheduler = Arc::new(FakeWorkScheduler { work_tx: work_tx.clone(), }); let runtime = FakeRuntime { @@ -329,20 +352,23 @@ pub async fn dns_lookup_timeout() { lookup_result: Ok(Vec::new()), }, }; + let dns_client = runtime.dns.clone(); let opts = ResolverOptions { authority: "ignored".to_string(), runtime: Arc::new(runtime), work_scheduler: work_scheduler.clone(), }; - let dns_opts = super::DnsOptions { - min_resolution_interval: super::get_min_resolution_interval(), + let dns_opts = DnsOptions { + min_resolution_interval: get_min_resolution_interval(), resolving_timeout: DEFAULT_TEST_SHORT_TIMEOUT, backoff_config: DEFAULT_EXPONENTIAL_CONFIG, + host: "grpc.io".to_string(), + port: 1234, }; - let mut resolver = super::DnsResolver::new(target, opts, dns_opts); + let mut resolver = DnsResolver::new(Box::new(dns_client), opts, dns_opts); // Wait for schedule work to be called. - work_rx.recv().await.unwrap(); + let _ = work_rx.recv().await.unwrap(); let (update_tx, mut update_rx) = mpsc::unbounded_channel(); let mut channel_controller = FakeChannelController { update_tx, @@ -352,16 +378,13 @@ pub async fn dns_lookup_timeout() { // An error endpoint update should be received. let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.err().unwrap().contains("Timed out")); + assert_eq!(update.endpoints.err().unwrap().contains("Timed out"), true); } #[tokio::test] pub async fn rate_limit() { - super::reg(); - let builder = GLOBAL_RESOLVER_REGISTRY.get("dns").unwrap(); - let target = &"dns:///localhost:1234".parse().unwrap(); let (work_tx, mut work_rx) = mpsc::unbounded_channel(); - let work_scheduler = Arc::new(WorkScheduler { + let work_scheduler = Arc::new(FakeWorkScheduler { work_tx: work_tx.clone(), }); let opts = ResolverOptions { @@ -369,15 +392,21 @@ pub async fn rate_limit() { runtime: Arc::new(TokioRuntime {}), work_scheduler: work_scheduler.clone(), }; - let dns_opts = super::DnsOptions { + let dns_client = opts + .runtime + .get_dns_resolver(rt::ResolverOptions { server_addr: None }) + .unwrap(); + let dns_opts = DnsOptions { min_resolution_interval: Duration::from_secs(20), - resolving_timeout: super::get_resolving_timeout(), + resolving_timeout: get_resolving_timeout(), backoff_config: DEFAULT_EXPONENTIAL_CONFIG, + host: "localhost".to_string(), + port: 1234, }; - let mut resolver = super::DnsResolver::new(target, opts, dns_opts); + let mut resolver = DnsResolver::new(dns_client, opts, dns_opts); // Wait for schedule work to be called. - work_rx.recv().await.unwrap(); + let event = work_rx.recv().await.unwrap(); let (update_tx, mut update_rx) = mpsc::unbounded_channel(); let mut channel_controller = FakeChannelController { update_tx, @@ -386,14 +415,14 @@ pub async fn rate_limit() { resolver.work(&mut channel_controller); // A successful endpoint update should be received. let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.unwrap().len() > 1); + assert_eq!(update.endpoints.unwrap().len() > 1, true); // Call resolve_now repeatedly, new updates should not be produced. - for i in 0..5 { + for _ in 0..5 { resolver.resolve_now(); tokio::select! { _ = work_rx.recv() => { - panic!("Received unexpected work request from resolver: {:?}", ()); + panic!("Received unexpected work request from resolver: {:?}", event); } _ = tokio::time::sleep(DEFAULT_TEST_SHORT_TIMEOUT) => { println!("No work requested from resolver."); @@ -404,11 +433,8 @@ pub async fn rate_limit() { #[tokio::test] pub async fn re_resolution_after_success() { - super::reg(); - let builder = GLOBAL_RESOLVER_REGISTRY.get("dns").unwrap(); - let target = &"dns:///localhost:1234".parse().unwrap(); let (work_tx, mut work_rx) = mpsc::unbounded_channel(); - let work_scheduler = Arc::new(WorkScheduler { + let work_scheduler = Arc::new(FakeWorkScheduler { work_tx: work_tx.clone(), }); let opts = ResolverOptions { @@ -416,15 +442,21 @@ pub async fn re_resolution_after_success() { runtime: Arc::new(TokioRuntime {}), work_scheduler: work_scheduler.clone(), }; - let dns_opts = super::DnsOptions { + let dns_opts = DnsOptions { min_resolution_interval: Duration::from_millis(1), - resolving_timeout: super::get_resolving_timeout(), + resolving_timeout: get_resolving_timeout(), backoff_config: DEFAULT_EXPONENTIAL_CONFIG, + host: "localhost".to_string(), + port: 1234, }; - let mut resolver = super::DnsResolver::new(target, opts, dns_opts); + let dns_client = opts + .runtime + .get_dns_resolver(rt::ResolverOptions { server_addr: None }) + .unwrap(); + let mut resolver = DnsResolver::new(dns_client, opts, dns_opts); // Wait for schedule work to be called. - work_rx.recv().await.unwrap(); + let _ = work_rx.recv().await.unwrap(); let (update_tx, mut update_rx) = mpsc::unbounded_channel(); let mut channel_controller = FakeChannelController { update_tx, @@ -433,23 +465,20 @@ pub async fn re_resolution_after_success() { resolver.work(&mut channel_controller); // A successful endpoint update should be received. let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.unwrap().len() > 1); + assert_eq!(update.endpoints.unwrap().len() > 1, true); // Call resolve_now, a new update should be produced. resolver.resolve_now(); - work_rx.recv().await.unwrap(); + let _ = work_rx.recv().await.unwrap(); resolver.work(&mut channel_controller); let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.unwrap().len() > 1); + assert_eq!(update.endpoints.unwrap().len() > 1, true); } #[tokio::test] pub async fn backoff_on_error() { - super::reg(); - let builder = GLOBAL_RESOLVER_REGISTRY.get("dns").unwrap(); - let target = &"dns:///localhost:1234".parse().unwrap(); let (work_tx, mut work_rx) = mpsc::unbounded_channel(); - let work_scheduler = Arc::new(WorkScheduler { + let work_scheduler = Arc::new(FakeWorkScheduler { work_tx: work_tx.clone(), }); let opts = ResolverOptions { @@ -457,9 +486,9 @@ pub async fn backoff_on_error() { runtime: Arc::new(TokioRuntime {}), work_scheduler: work_scheduler.clone(), }; - let dns_opts = super::DnsOptions { + let dns_opts = DnsOptions { min_resolution_interval: Duration::from_millis(1), - resolving_timeout: super::get_resolving_timeout(), + resolving_timeout: get_resolving_timeout(), // Speed up the backoffs to make the test run faster. backoff_config: BackoffConfig { base_delay: Duration::from_millis(1), @@ -467,8 +496,15 @@ pub async fn backoff_on_error() { jitter: 0.0, max_delay: Duration::from_millis(1), }, + host: "localhost".to_string(), + port: 1234, }; - let mut resolver = super::DnsResolver::new(target, opts, dns_opts); + let dns_client = opts + .runtime + .get_dns_resolver(rt::ResolverOptions { server_addr: None }) + .unwrap(); + + let mut resolver = DnsResolver::new(dns_client, opts, dns_opts); let (update_tx, mut update_rx) = mpsc::unbounded_channel(); let mut channel_controller = FakeChannelController { @@ -478,19 +514,19 @@ pub async fn backoff_on_error() { // As the channel returned an error to the resolver, the resolver will // backoff and re-attempt resolution. - for i in 0..5 { - work_rx.recv().await.unwrap(); + for _ in 0..5 { + let _ = work_rx.recv().await.unwrap(); resolver.work(&mut channel_controller); let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.unwrap().len() > 1); + assert_eq!(update.endpoints.unwrap().len() > 1, true); } // This time the channel accepts the resolver update. channel_controller.update_result = Ok(()); - work_rx.recv().await.unwrap(); + let _ = work_rx.recv().await.unwrap(); resolver.work(&mut channel_controller); let update = update_rx.recv().await.unwrap(); - assert!(update.endpoints.unwrap().len() > 1); + assert_eq!(update.endpoints.unwrap().len() > 1, true); // Since the channel controller returns Ok(), the resolver will stop // producing more updates. diff --git a/grpc/src/client/name_resolution/mod.rs b/grpc/src/client/name_resolution/mod.rs index bb1337dac..47f33bde6 100644 --- a/grpc/src/client/name_resolution/mod.rs +++ b/grpc/src/client/name_resolution/mod.rs @@ -2,17 +2,23 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ @@ -23,38 +29,59 @@ //! a service. use core::fmt; -use super::service_config::{self, ServiceConfig}; -use crate::{attributes::Attributes, rt}; +use super::service_config::ServiceConfig; +use crate::{attributes::Attributes, byte_str::ByteStr, rt::Runtime}; use std::{ - error::Error, fmt::{Display, Formatter}, - hash::Hash, + hash::{Hash, Hasher}, str::FromStr, sync::Arc, }; -use tokio::sync::Notify; mod backoff; mod dns; -mod passthrough; mod registry; -pub use registry::{ResolverRegistry, GLOBAL_RESOLVER_REGISTRY}; +pub use registry::global_registry; +use url::Url; +/// Target represents a target for gRPC, as specified in: +/// https://github.com/grpc/grpc/blob/master/doc/naming.md. +/// It is parsed from the target string that gets passed during channel creation +/// by the user. gRPC passes it to the resolver and the balancer. +/// +/// If the target follows the naming spec, and the parsed scheme is registered +/// with gRPC, we will parse the target string according to the spec. If the +/// target does not contain a scheme or if the parsed scheme is not registered +/// (i.e. no corresponding resolver available to resolve the endpoint), we will +/// apply the default scheme, and will attempt to reparse it. +#[derive(Debug, Clone)] pub struct Target { - url: url::Url, + url: Url, } impl FromStr for Target { type Err = String; fn from_str(s: &str) -> Result { - match s.parse::() { + match s.parse::() { Ok(url) => Ok(Target { url }), Err(err) => Err(err.to_string()), } } } +impl Display for Target { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}://{}{}", + self.scheme(), + self.authority_host_port(), + self.path() + ) + } +} + impl From for Target { fn from(url: url::Url) -> Self { Target { url } @@ -82,7 +109,7 @@ impl Target { } /// The port part of the authority. - pub fn aythority_port(&self) -> Option { + pub fn authority_port(&self) -> Option { self.url.port() } @@ -90,7 +117,7 @@ impl Target { /// in the authority. pub fn authority_host_port(&self) -> String { let host = self.authority_host(); - let port = self.aythority_port(); + let port = self.authority_port(); if let Some(port) = port { format!("{}:{}", host, port) } else { @@ -104,21 +131,9 @@ impl Target { } } -impl Display for Target { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}//{}/{}", - self.scheme(), - self.authority_host_port(), - self.path() - ) - } -} - /// A name resolver factory pub trait ResolverBuilder: Send + Sync { - /// Builds a name resolver instance, or returns an error. + /// Builds a name resolver instance. /// /// Note that build must not fail. Instead, an erroring Resolver may be /// returned that calls ChannelController.update() with an Err value. @@ -128,11 +143,15 @@ pub trait ResolverBuilder: Send + Sync { fn scheme(&self) -> &str; /// Returns the default authority for a channel using this name resolver - /// and target. This is typically the same as the service's name. By - /// default, the default_authority method automatically returns the path - /// portion of the target URI, with the leading prefix removed. - fn default_authority(&self, uri: &Target) -> String { - let path = uri.path(); + /// and target. This refers to the *dataplane authority* — the value used + /// in the `:authority` header of HTTP/2 requests — and not to be confused + /// with the authority portion of the target URI, which typically specifies + /// the name of an external server used for name resolution. + /// + /// By default, this method returns the path portion of the target URI, + /// with the leading prefix removed. + fn default_authority(&self, target: &Target) -> String { + let path = target.path(); path.strip_prefix("/").unwrap_or(path).to_string() } @@ -145,12 +164,18 @@ pub trait ResolverBuilder: Send + Sync { /// name resolver. #[non_exhaustive] pub struct ResolverOptions { - /// Authority is the effective authority of the channel for which the - /// resolver is built. + /// The authority that will be used for the channel by default. This refers + /// to the `:authority` value sent in HTTP/2 requests — the dataplane + /// authority — and not the authority portion of the target URI, which is + /// typically used to identify the name resolution server. + /// + /// This value is either the result of the `default_authority` method of + /// this `ResolverBuilder`, or another string if the channel was explicitly + /// configured to override the default. pub authority: String, /// The runtime which provides utilities to do async work. - pub runtime: Arc, + pub runtime: Arc, /// A hook into the channel's work scheduler that allows the Resolver to /// request the ability to perform operations on the ChannelController. @@ -167,21 +192,23 @@ pub trait WorkScheduler: Send + Sync { /// Resolver watches for the updates on the specified target. /// Updates include address updates and service config updates. -pub trait Resolver: Send { - /// Asks the resolver to obtain an updated resolver result, if - /// applicable. +// This trait may not need the Sync sub-trait if the channel implementation can +// ensure that the resolver is accessed serially. The sub-trait can be removed +// in that case. +pub trait Resolver: Send + Sync { + /// Asks the resolver to obtain an updated resolver result, if applicable. /// - /// This is useful for pull-based implementations to decide when to - /// re-resolve. However, the implementation is not required to - /// re-resolve immediately upon receiving this call; it may instead - /// elect to delay based on some configured minimum time between - /// queries, to avoid hammering the name service with queries. + /// This is useful for polling resolvers to decide when to re-resolve. + /// However, the implementation is not required to re-resolve immediately + /// upon receiving this call; it may instead elect to delay based on some + /// configured minimum time between queries, to avoid hammering the name + /// service with queries. /// - /// For push-based implementations, this may be a no-op. + /// For watch based resolvers, this may be a no-op. fn resolve_now(&mut self); - /// Called serially by the work scheduler to do work after the helper's - /// schedule_work method is called. + /// Called serially by the channel to provide access to the + /// `ChannelController`. fn work(&mut self, channel_controller: &mut dyn ChannelController); } @@ -207,13 +234,14 @@ pub trait ChannelController: Send + Sync { pub struct ResolverUpdate { /// Attributes contains arbitrary data about the resolver intended for /// consumption by the load balancing policy. - pub attributes: Arc, + pub attributes: Attributes, - /// Endpoints is the latest set of resolved endpoints for the target. + /// A list of endpoints which each identify a logical host serving the + /// service indicated by the target URI. pub endpoints: Result, String>, - /// service_config contains the result from parsing the latest service - /// config. If it is None, it indicates no service config is present or + /// The service config which the client should use for communicating with + /// the service. If it is None, it indicates no service config is present or /// the resolver does not provide service configs. pub service_config: Result, String>, @@ -229,10 +257,10 @@ pub struct ResolverUpdate { impl Default for ResolverUpdate { fn default() -> Self { ResolverUpdate { - service_config: Ok(None), - attributes: Arc::default(), - endpoints: Ok(Vec::default()), - resolution_note: None, + service_config: Ok(Default::default()), + attributes: Default::default(), + endpoints: Ok(Default::default()), + resolution_note: Default::default(), } } } @@ -251,9 +279,15 @@ pub struct Endpoint { pub attributes: Attributes, } -#[non_exhaustive] -#[derive(Debug, Clone, Default, PartialOrd, Ord)] +impl Hash for Endpoint { + fn hash(&self, state: &mut H) { + self.addresses.hash(state); + } +} + /// An Address is an identifier that indicates how to connect to a server. +#[non_exhaustive] +#[derive(Debug, Clone, Default, Ord, PartialOrd)] pub struct Address { /// The network type is used to identify what kind of transport to create /// when connecting to this address. Typically TCP_IP_ADDRESS_TYPE. @@ -261,7 +295,7 @@ pub struct Address { /// The address itself is passed to the transport in order to create a /// connection to it. - pub address: String, + pub address: ByteStr, /// Attributes contains arbitrary data about this address intended for /// consumption by the subchannel. @@ -276,20 +310,6 @@ impl PartialEq for Address { } } -impl Eq for Endpoint {} - -impl PartialEq for Endpoint { - fn eq(&self, other: &Self) -> bool { - self.addresses == other.addresses - } -} - -impl Hash for Endpoint { - fn hash(&self, state: &mut H) { - self.addresses.hash(state); - } -} - impl Hash for Address { fn hash(&self, state: &mut H) { self.network_type.hash(state); @@ -299,7 +319,7 @@ impl Hash for Address { impl Display for Address { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}:{}", self.network_type, self.address) + write!(f, "{}:{}", self.network_type, self.address.to_string()) } } @@ -307,6 +327,21 @@ impl Display for Address { /// via TCP/IP. pub static TCP_IP_NETWORK_TYPE: &str = "tcp"; +// A resolver that returns the same result every time its work method is called. +// It can be used to return an error to the channel when a resolver fails to +// build. +struct NopResolver { + pub update: ResolverUpdate, +} + +impl Resolver for NopResolver { + fn resolve_now(&mut self) {} + + fn work(&mut self, channel_controller: &mut dyn ChannelController) { + let _ = channel_controller.update(self.update.clone()); + } +} + #[cfg(test)] mod test { use super::Target; @@ -321,6 +356,7 @@ mod test { want_port: Option, want_host_port: &'static str, want_path: &'static str, + want_str: &'static str, } let test_cases = vec![ TestCase { @@ -330,6 +366,7 @@ mod test { want_host: "", want_port: None, want_path: "/grpc.io", + want_str: "dns:///grpc.io", }, TestCase { input: "dns://8.8.8.8:53/grpc.io/docs", @@ -338,6 +375,7 @@ mod test { want_host: "8.8.8.8", want_port: Some(53), want_path: "/grpc.io/docs", + want_str: "dns://8.8.8.8:53/grpc.io/docs", }, TestCase { input: "unix:path/to/file", @@ -346,6 +384,7 @@ mod test { want_host: "", want_port: None, want_path: "path/to/file", + want_str: "unix://path/to/file", }, TestCase { input: "unix:///run/containerd/containerd.sock", @@ -354,6 +393,7 @@ mod test { want_host: "", want_port: None, want_path: "/run/containerd/containerd.sock", + want_str: "unix:///run/containerd/containerd.sock", }, ]; @@ -361,9 +401,10 @@ mod test { let target: Target = tc.input.parse().unwrap(); assert_eq!(target.scheme(), tc.want_scheme); assert_eq!(target.authority_host(), tc.want_host); - assert_eq!(target.aythority_port(), tc.want_port); + assert_eq!(target.authority_port(), tc.want_port); assert_eq!(target.authority_host_port(), tc.want_host_port); assert_eq!(target.path(), tc.want_path); + assert_eq!(&target.to_string(), tc.want_str); } } } diff --git a/grpc/src/client/name_resolution/registry.rs b/grpc/src/client/name_resolution/registry.rs index f484e7d68..aeb0331c9 100644 --- a/grpc/src/client/name_resolution/registry.rs +++ b/grpc/src/client/name_resolution/registry.rs @@ -1,41 +1,80 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + use std::{ collections::HashMap, - str::FromStr, - sync::{Arc, Mutex}, + sync::{Arc, Mutex, OnceLock}, }; -use once_cell::sync::Lazy; -use tokio::sync::Notify; +use super::ResolverBuilder; -use super::{Resolver, ResolverBuilder, ResolverOptions}; +static GLOBAL_RESOLVER_REGISTRY: OnceLock = OnceLock::new(); /// A registry to store and retrieve name resolvers. Resolvers are indexed by /// the URI scheme they are intended to handle. #[derive(Default)] pub struct ResolverRegistry { - m: Arc>>>, + inner: Arc>>>, } impl ResolverRegistry { /// Construct an empty name resolver registry. fn new() -> Self { - Self { m: Arc::default() } + Self { + inner: Arc::default(), + } } /// Add a name resolver into the registry. builder.scheme() will - // be used as the scheme registered with this builder. If multiple - // resolvers are registered with the same name, the one registered last - // will take effect. Panics if the given scheme contains uppercase - // characters. + /// be used as the scheme registered with this builder. If multiple + /// resolvers are registered with the same name, the one registered last + /// will take effect. + /// + /// # Panics + /// + /// Panics if the given scheme contains uppercase characters. pub fn add_builder(&self, builder: Box) { + self.try_add_builder(builder).unwrap(); + } + + /// Add a name resolver into the registry. builder.scheme() will + /// be used as the scheme registered with this builder. If multiple + /// resolvers are registered with the same name, the one registered last + /// will take effect. + pub fn try_add_builder(&self, builder: Box) -> Result<(), String> { let scheme = builder.scheme(); if scheme.chars().any(|c| c.is_ascii_uppercase()) { - panic!("Scheme must not contain uppercase characters: {}", scheme); + return Err(format!( + "Scheme must not contain uppercase characters: {}", + scheme + )); } - self.m + self.inner .lock() .unwrap() .insert(scheme.to_string(), Arc::from(builder)); + return Ok(()); } /// Returns the resolver builder registered for the given scheme, if any. @@ -43,13 +82,15 @@ impl ResolverRegistry { /// The provided scheme is case-insensitive; any uppercase characters /// will be converted to lowercase before lookup. pub fn get(&self, scheme: &str) -> Option> { - self.m + self.inner .lock() .unwrap() - .get(&scheme.to_lowercase()).cloned() + .get(&scheme.to_lowercase()) + .cloned() } } /// Global registry for resolver builders. -pub static GLOBAL_RESOLVER_REGISTRY: std::sync::LazyLock = - std::sync::LazyLock::new(ResolverRegistry::new); +pub fn global_registry() -> &'static ResolverRegistry { + GLOBAL_RESOLVER_REGISTRY.get_or_init(ResolverRegistry::new) +} diff --git a/grpc/src/client/service_config.rs b/grpc/src/client/service_config.rs index 87b2ecb7e..da268ca33 100644 --- a/grpc/src/client/service_config.rs +++ b/grpc/src/client/service_config.rs @@ -2,17 +2,23 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ use std::{any::Any, error::Error, sync::Arc}; @@ -20,11 +26,11 @@ use std::{any::Any, error::Error, sync::Arc}; /// An in-memory representation of a service config, usually provided to gRPC as /// a JSON object. #[derive(Debug, Default, Clone)] -pub struct ServiceConfig; +pub(crate) struct ServiceConfig; /// A convenience wrapper for an LB policy's configuration object. #[derive(Debug)] -pub struct LbConfig { +pub(crate) struct LbConfig { config: Arc, } diff --git a/grpc/src/client/subchannel.rs b/grpc/src/client/subchannel.rs index 433dbc481..d325257e1 100644 --- a/grpc/src/client/subchannel.rs +++ b/grpc/src/client/subchannel.rs @@ -350,7 +350,7 @@ impl InternalSubchannel { _ = tokio::time::sleep(min_connect_timeout) => { let _ = state_machine_tx.send(SubchannelStateMachineEvent::ConnectionTimedOut); } - result = transport.connect(address.clone()) => { + result = transport.connect(address.to_string().clone()) => { match result { Ok(s) => { let _ = state_machine_tx.send(SubchannelStateMachineEvent::ConnectionSucceeded(Arc::from(s))); @@ -457,7 +457,7 @@ impl SubchannelKey { impl Display for SubchannelKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.address.address) + write!(f, "{}", self.address.address.to_string()) } } @@ -466,6 +466,7 @@ impl Debug for SubchannelKey { write!(f, "{}", self.address) } } + pub(super) struct InternalSubchannelPool { subchannels: RwLock>>, } diff --git a/grpc/src/inmemory/mod.rs b/grpc/src/inmemory/mod.rs index ad5fab27c..6884f4d68 100644 --- a/grpc/src/inmemory/mod.rs +++ b/grpc/src/inmemory/mod.rs @@ -10,8 +10,8 @@ use std::{ use crate::{ client::{ name_resolution::{ - self, Address, ChannelController, Endpoint, Resolver, ResolverBuilder, ResolverOptions, - ResolverUpdate, GLOBAL_RESOLVER_REGISTRY, + self, global_registry, Address, ChannelController, Endpoint, Resolver, ResolverBuilder, + ResolverOptions, ResolverUpdate, }, transport::{self, ConnectedTransport, GLOBAL_TRANSPORT_REGISTRY}, }, @@ -124,7 +124,7 @@ static INMEMORY_NETWORK_TYPE: &str = "inmemory"; pub fn reg() { GLOBAL_TRANSPORT_REGISTRY.add_transport(INMEMORY_NETWORK_TYPE, ClientTransport::new()); - GLOBAL_RESOLVER_REGISTRY.add_builder(Box::new(InMemoryResolverBuilder)); + global_registry().add_builder(Box::new(InMemoryResolverBuilder)); } struct InMemoryResolverBuilder; @@ -159,7 +159,7 @@ impl Resolver for NopResolver { for addr in LISTENERS.lock().unwrap().keys() { addresses.push(Address { network_type: INMEMORY_NETWORK_TYPE, - address: addr.clone(), + address: addr.clone().into(), ..Default::default() }); } diff --git a/grpc/src/lib.rs b/grpc/src/lib.rs index b376c7915..2f35aaf2d 100644 --- a/grpc/src/lib.rs +++ b/grpc/src/lib.rs @@ -2,17 +2,23 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ @@ -25,7 +31,6 @@ //! [gRPC]: https://grpc.io #![allow(dead_code, unused_variables, unused_imports)] -pub mod attributes; pub mod client; pub mod codec; pub mod credentials; @@ -33,3 +38,6 @@ pub mod inmemory; pub mod rt; pub mod server; pub mod service; + +pub(crate) mod attributes; +pub(crate) mod byte_str; diff --git a/grpc/src/rt/mod.rs b/grpc/src/rt/mod.rs index fb27395a6..9f862a04b 100644 --- a/grpc/src/rt/mod.rs +++ b/grpc/src/rt/mod.rs @@ -2,24 +2,30 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ -use std::{future::Future, net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; - use ::tokio::io::{AsyncRead, AsyncWrite}; +use std::{future::Future, net::SocketAddr, pin::Pin, time::Duration}; + pub(crate) mod hyper_wrapper; pub mod tokio; @@ -30,7 +36,7 @@ pub mod tokio; /// time-based operations such as sleeping. It provides a uniform interface /// that can be implemented for various async runtimes, enabling pluggable /// and testable infrastructure. -pub trait Runtime: Send + Sync { +pub(super) trait Runtime: Send + Sync { /// Spawns the given asynchronous task to run in the background. fn spawn( &self, @@ -52,15 +58,17 @@ pub trait Runtime: Send + Sync { ) -> Pin, String>> + Send>>; } -pub trait Sleep: Send + Sync + Future {} +/// A future that resolves after a specified duration. +pub(super) trait Sleep: Send + Sync + Future {} -pub trait TaskHandle: Send + Sync { +pub(super) trait TaskHandle: Send + Sync { /// Abort the associated task. fn abort(&self); } +/// A trait for asynchronous DNS resolution. #[tonic::async_trait] -pub trait DnsResolver: Send + Sync { +pub(super) trait DnsResolver: Send + Sync { /// Resolve an address async fn lookup_host_name(&self, name: &str) -> Result, String>; /// Perform a TXT record lookup. If a txt record contains multiple strings, @@ -69,10 +77,10 @@ pub trait DnsResolver: Send + Sync { } #[derive(Default)] -pub struct ResolverOptions { +pub(super) struct ResolverOptions { /// The address of the DNS server in "IP:port" format. If None, the /// system's default DNS server will be used. - pub server_addr: Option, + pub(super) server_addr: Option, } #[derive(Default)] diff --git a/grpc/src/rt/tokio/hickory_resolver.rs b/grpc/src/rt/tokio/hickory_resolver.rs index 13b20b010..343aa2315 100644 --- a/grpc/src/rt/tokio/hickory_resolver.rs +++ b/grpc/src/rt/tokio/hickory_resolver.rs @@ -2,32 +2,46 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ -use hickory_resolver::config::{NameServerConfigGroup, ResolverConfig, ResolverOpts}; +use std::net::IpAddr; + +use hickory_resolver::{ + config::{LookupIpStrategy, NameServerConfigGroup, ResolverConfig, ResolverOpts}, + name_server::TokioConnectionProvider, + TokioResolver, +}; + +use crate::rt::{self, ResolverOptions}; /// A DNS resolver that uses hickory with the tokio runtime. This supports txt /// lookups in addition to A and AAAA record lookups. It also supports using /// custom DNS servers. -pub struct DnsResolver { +pub(super) struct DnsResolver { resolver: hickory_resolver::TokioResolver, } #[tonic::async_trait] -impl super::DnsResolver for DnsResolver { - async fn lookup_host_name(&self, name: &str) -> Result, String> { +impl rt::DnsResolver for DnsResolver { + async fn lookup_host_name(&self, name: &str) -> Result, String> { let response = self .resolver .lookup_ip(name) @@ -56,21 +70,21 @@ impl super::DnsResolver for DnsResolver { } impl DnsResolver { - pub fn new(opts: super::ResolverOptions) -> Result { + pub(super) fn new(opts: ResolverOptions) -> Result { let builder = if let Some(server_addr) = opts.server_addr { - let provider = hickory_resolver::name_server::TokioConnectionProvider::default(); + let provider = TokioConnectionProvider::default(); let name_servers = NameServerConfigGroup::from_ips_clear( &[server_addr.ip()], server_addr.port(), true, ); let config = ResolverConfig::from_parts(None, vec![], name_servers); - hickory_resolver::TokioResolver::builder_with_config(config, provider) + TokioResolver::builder_with_config(config, provider) } else { - hickory_resolver::TokioResolver::builder_tokio().map_err(|err| err.to_string())? + TokioResolver::builder_tokio().map_err(|err| err.to_string())? }; let mut resolver_opts = ResolverOpts::default(); - resolver_opts.ip_strategy = hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6; + resolver_opts.ip_strategy = LookupIpStrategy::Ipv4AndIpv6; Ok(DnsResolver { resolver: builder.with_options(resolver_opts).build(), }) diff --git a/grpc/src/rt/tokio/mod.rs b/grpc/src/rt/tokio/mod.rs index f26729c9f..dd5bb6b70 100644 --- a/grpc/src/rt/tokio/mod.rs +++ b/grpc/src/rt/tokio/mod.rs @@ -2,41 +2,52 @@ * * Copyright 2025 gRPC authors. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * http://www.apache.org/licenses/LICENSE-2.0 + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. * */ -use std::{future::Future, net::SocketAddr, pin::Pin}; +use std::{ + future::Future, + net::{IpAddr, SocketAddr}, + pin::Pin, + time::Duration, +}; -use futures_util::TryFutureExt; use tokio::{ io::{AsyncRead, AsyncWrite}, - net::{TcpSocket, TcpStream}, + net::TcpStream, + task::JoinHandle, }; use super::{DnsResolver, ResolverOptions, Runtime, Sleep, TaskHandle}; -#[cfg(feature = "hickory_dns")] +#[cfg(feature = "dns")] mod hickory_resolver; /// A DNS resolver that uses tokio::net::lookup_host for resolution. It only /// supports host lookups. -pub struct TokioDefaultDnsResolver {} +struct TokioDefaultDnsResolver {} #[tonic::async_trait] impl DnsResolver for TokioDefaultDnsResolver { - async fn lookup_host_name(&self, name: &str) -> Result, String> { - let name_with_port = match name.parse::() { + async fn lookup_host_name(&self, name: &str) -> Result, String> { + let name_with_port = match name.parse::() { Ok(ip) => SocketAddr::new(ip, 0).to_string(), Err(_) => format!("{}:0", name), }; @@ -49,13 +60,13 @@ impl DnsResolver for TokioDefaultDnsResolver { } async fn lookup_txt(&self, _name: &str) -> Result, String> { - Err("TXT record lookup unavailable. Enable the optional 'hickory_dns' feature to enable service config lookups.".to_string()) + Err("TXT record lookup unavailable. Enable the optional 'dns' feature to enable service config lookups.".to_string()) } } -pub struct TokioRuntime {} +pub(crate) struct TokioRuntime {} -impl TaskHandle for tokio::task::JoinHandle<()> { +impl TaskHandle for JoinHandle<()> { fn abort(&self) { self.abort() } @@ -72,17 +83,17 @@ impl Runtime for TokioRuntime { } fn get_dns_resolver(&self, opts: ResolverOptions) -> Result, String> { - #[cfg(feature = "hickory_dns")] + #[cfg(feature = "dns")] { Ok(Box::new(hickory_resolver::DnsResolver::new(opts)?)) } - #[cfg(not(feature = "hickory_dns"))] + #[cfg(not(feature = "dns"))] { Ok(Box::new(TokioDefaultDnsResolver::new(opts)?)) } } - fn sleep(&self, duration: std::time::Duration) -> Pin> { + fn sleep(&self, duration: Duration) -> Pin> { Box::pin(tokio::time::sleep(duration)) } @@ -112,7 +123,7 @@ impl Runtime for TokioRuntime { impl TokioDefaultDnsResolver { pub fn new(opts: ResolverOptions) -> Result { if opts.server_addr.is_some() { - return Err("Custom DNS server are not supported, enable optional feature 'hickory_dns' to enable support.".to_string()); + return Err("Custom DNS server are not supported, enable optional feature 'dns' to enable support.".to_string()); } Ok(TokioDefaultDnsResolver {}) } diff --git a/grpc/src/service.rs b/grpc/src/service.rs index 370946400..7b9401ed9 100644 --- a/grpc/src/service.rs +++ b/grpc/src/service.rs @@ -1,12 +1,32 @@ -use std::{any::Any, pin::Pin, time::Instant}; +/* + * + * Copyright 2025 gRPC authors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +use std::{any::Any, pin::Pin}; use futures_core::Stream; -use tokio::sync::mpsc::{self, Receiver, Sender}; use tonic::{async_trait, Request as TonicRequest, Response as TonicResponse, Status}; -#[derive(Debug)] -struct TODO; - pub type Request = TonicRequest> + Send + Sync>>>; pub type Response = TonicResponse, Status>> + Send + Sync>>>; @@ -16,4 +36,5 @@ pub trait Service: Send + Sync { async fn call(&self, method: String, request: Request) -> Response; } +// TODO: define methods that will allow serialization/deserialization. pub trait Message: Any + Send + Sync {} diff --git a/interop/Cargo.toml b/interop/Cargo.toml index 875de7d50..9994895a4 100644 --- a/interop/Cargo.toml +++ b/interop/Cargo.toml @@ -16,12 +16,10 @@ path = "src/bin/server.rs" async-stream = "0.3" strum = {version = "0.27", features = ["derive"]} pico-args = {version = "0.5", features = ["eq-separator"]} -console = "0.15" +console = "0.16" http = "1" -http-body = "1" http-body-util = "0.1" -hyper = "1" -prost = "0.13" +prost = "0.14" tokio = {version = "1.0", features = ["rt-multi-thread", "time", "macros"]} tokio-stream = "0.1" tonic = {path = "../tonic", features = ["tls-ring"]} diff --git a/interop/build.rs b/interop/build.rs index 7783987a9..295d5e2a3 100644 --- a/interop/build.rs +++ b/interop/build.rs @@ -4,5 +4,5 @@ fn main() { tonic_build::compile_protos(proto).unwrap(); // prevent needing to rebuild if files (or deps) haven't changed - println!("cargo:rerun-if-changed={}", proto); + println!("cargo:rerun-if-changed={proto}"); } diff --git a/interop/data/ca.pem b/interop/data/ca.pem index 841147934..4176c0370 100644 --- a/interop/data/ca.pem +++ b/interop/data/ca.pem @@ -1,20 +1,20 @@ -----BEGIN CERTIFICATE----- -MIIDRjCCAi6gAwIBAgIQQS24If9oGkeiIDhCX3LptTANBgkqhkiG9w0BAQsFADA9 +MIIDRjCCAi6gAwIBAgIQd5pnuFdwgGxb4RiClYEPMTANBgkqhkiG9w0BAQsFADA9 MQ4wDAYDVQQKEwVUb2tpbzEQMA4GA1UECxMHVGVzdGluZzEZMBcGA1UEAxMQVG9u -aWMgVGVzdGluZyBDQTAeFw0yNDExMTMxOTQ0MzBaFw0zNDExMTExOTQ0MzBaMD0x +aWMgVGVzdGluZyBDQTAeFw0yNTA1MDExNzQyNThaFw0zNTA0MjkxNzQyNThaMD0x DjAMBgNVBAoTBVRva2lvMRAwDgYDVQQLEwdUZXN0aW5nMRkwFwYDVQQDExBUb25p -YyBUZXN0aW5nIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuRMq -OMDsXvkEc/ArFxHhNNd1qIRhgPElLR/de091WVEKGGQI7OEJLE5/dfD2RMe0PdvZ -tEhURM/SUkreYeJhC7AFbAVM6cDC7Lj+GB1v63EbpFTDkkrJ5+GN7DtID/FNYg4M -sBlHCMys2ZWByAy6/fBZ4PKyJbMX4gtySLAyCTqI/BV1TC/7tgxiNrSNv/MZiqqO -FOdAfpkdb/mGWkxOB+JYWT8QBjIWcWaoWJcwmlY4ZC6U5aaNxO3VKDdGyghKkPS7 -nDAMe5H3Cl/rR1TKY3A9TVcE4qQ514647NQpCoOJZ39PbXJy+S4v3qI61IvFBPFh -C3tgZABh8h9dO7ddGQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAgQwDwYDVR0TAQH/ -BAUwAwEB/zAdBgNVHQ4EFgQU6k4lLdVI7YCNA9eQBb8hO3nIFVQwDQYJKoZIhvcN -AQELBQADggEBADhJOxK2tPlai1pSwT0ud7oXXfAQpI1PZ9FxWR9qx1UsTLb3sfTW -+20RQdguGXTby5wkHIiVJ6vAEDRd8X9oOf8vk2zvnmWLgiDdOh3e3OkOme6Qs+V9 -mXIA8JZnYGaqmAPcmZU5uY2lrf0oXURqb9ZNL5DCc1yJ1rVC3jCQLZZ1v6GGRNB5 -YhTWlQYUcu2nw8fN1scrV9322gU5siJzrNrjO9yabIYZyMpwcNwarvoxcVT8zLM5 -ZLUTjG2Yr8PJ13Bn2dyw8mDuL0ixXU34vQNIHBk1UmbzyWL9mDfu9RfpIDIDkEzK -sO/HXgh4vzETaIhkEuW9porYX5d1QyVED8I= +YyBUZXN0aW5nIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2C37 +LVCs4RfNdwv8NMZfIdFNqrUdwzXZ+a5B7Pee1nOL+JD9feOGn1qGZI1ZgFMqVygN +ejSkzlbouN9RAGgyBmOFFo3oEc+nz7kPrezBLoM3oVgNzhEixz2IQoafoZX3j48Y +fpGYmrTHUp4MAwUAt6Zb+kD7YGqD8//I5OMM4Y5R8yuYGsJHUUSZqYfgXCk0ZvVG +EX7zyr31cVLqto1vpuv5Uvp6WX5oGgbZVB0wvlqs9Ak+dblWBZQIsrUPU8kn/6kx +HilF8Lw24dRXr5oveFDMdD/n4sIh7Gr/O+VGH83gP/PawXy0WWn5qGAhdx+P99jI +UGAWNetu4vGgASLFkwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAgQwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU4AI0rUoGKFxe0gXIYujrhnuNsocwDQYJKoZIhvcN +AQELBQADggEBAEtSrTgz+suVMDRsdvxG7+BAcXfVc8pmou3KIFbDtIyog2BEU5nU +Z+zBqrVgbGv9B4D7XLcZgTShY17cvJP8QpUwT/gzI9uR8Lig9JGF7n1K43+aiAk9 +s7H8e74rwyPX6mRmuznd1sJdDsc5lohUPpZVI+7pRywedQw+QG6/n2cVvR0k0Txh +pF1XBpzuFA5t5uqW/v/QFqfGEuIDDMdW2JQSEB7UyH4V2yWswoYb/uf/xoNXWWqs +Y6RVSp6qVW8748rPPwmLaN8hHGIUNUnilQIXr67bX8i3FjoLHhQvKqUEKciXJWj9 +ssGOvq0QoVZNPltcZp9yID3W2kyxv6Hq8VA= -----END CERTIFICATE----- diff --git a/interop/data/server1.key b/interop/data/server1.key index 064de36c8..e7666ab67 100644 --- a/interop/data/server1.key +++ b/interop/data/server1.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA3IN57h6uzOii4giaDPPMhjc1Dm0gpX4GiwKCeo8jhhN37YjK -OLCLGEYOoyBoMiYr/A5neJ6eixpp9qHGlq3youOHW81dEGqqtasTH7jdXFbnIY72 -Nz8DftzxD8XXrQu8ZjzHWRbFcmvApWEt/7GE1mhUAIDURTdf4JkqeOCVRnGWNat1 -owjypqjhpXGYG/TTfqT+Tw5987O3ZotBjcv//WLEmx1GhKKYKCFeDiRH9ozEtInH -vBOP7u2kE1kU+7V1ijG/vdHSS5/E/KrXMVqxzoES58XNtUqsRH09Gji8ad/cD5GC -k/3f9ZzUFVO33pnnyY5by6QbBevKQe1Jki0WTwIDAQABAoIBAHXbI0jcR0qnL58l -P8iaaP529Tlvo9ovgCm9vqToafEX6KogyQwBd2YS03HmOSpMcoe13yF9jXkFNgsm -LbCM6bibaNXs7cd/axvLgl4a/NyEaeXqtbeTSzf7uC9Y60vGkPwHkfgQjpj39C+v -v9kANOIvQm4+bLVNwkWVNzkBt2a9AlyQJYaeKLWkVKPeoDF9UpdS9CovMQSJ5kZ/ -3I1old4C/AwC7WyyvNikSKwFGH+Sf/2v3RwwsTz4OJbi208A9riwfNJ4YMYC5vFQ -Y28xOPSYhXjuHMeKZV6KP5WKr+bELOzoj1zl1x3SZSZbTszAi8Gz1FTXRVPpCGOQ -HrK+RakCgYEA+kv99xw+zM+YvXYdlj5dF/EofSiD7eU+ASQ64+gnfhZiUeAWqssD -4VK6sS4DYlS0h1TkLtrO3iCYOUD+S9RsqOzUXft5a1wqWYiBa9QOpa6MV3s8DeS9 -wBK8BzMOtrPEsqKwrOe302MV2Aw1GabNVi0aLQPZQvYZBt7Hhe4eXasCgYEA4YnB -W8aPVS3953pQHH9Z6l39oy25GktRzHH3HpnFuSTgumvOvOE9LSLKmMoDpN2Krv4T -1cK2WwLOb+SjOkcY0A1j4+AVLsP9PJbRJbsP7OOVRB1Do7kFzQ9bEGHFpGD3lAkz -ClQSRkivLMsd86m1ivc80cmhNnOgpygteRJcHe0CgYAhhlAr6wKWWC/zIIDyAMRj -Uo/Dw8t3776QVJP2tr+jacgdg1BF7A9G/Ne4p5sYbpQHlF1D0Vbn9aGt+YCWE4vC -TIZdWDN5J80cVOZQ1QRpOKnfhcgTbFHmChxZMoOEASwVaSkU36yFib4BRBFQsEDM -jBn3cY6GI4RSoUBENhKnJQKBgEYAUKBgl5ozhSv0XasKp+jDNXcROPN9Ty0qbi30 -Qlc9p/aUgX1EV42Lz9/uS4U/Mc0wlQ1yutCypUo7Z6It8PiaP1e59DkooY/Nq6qP -TdkTpf+XKahGRBOqYXRLNGHZqt4qoMni4C0qYByCCpDXKr6wEBN5Bm11I/bd1IdQ -eIDdAoGAKAS/R07REX3klh/TV31YdfhaQfrhtVLQ4LArfEE13XOHF2zILkO849Gf -XpKtcIXDh9JVK7XagreHnYThBigEnmQckwfc1AoJf/KmCa+PSrsDMTdR0IMHIcr3 -zgchjL3chUpgvj2ckaqXf4qxUml0Lgm/qF45mJ7ut5k9TcKiKyY= +MIIEpAIBAAKCAQEA7iJJ8gLlKsp+r15CR15Iz2rmi3f3OZmA8FZ0hpB5hNkQHfVA +RlC2yawIfHiLO4tpUmjtX8iq3RXPkKPYP5Zfd1BDLdR/2qhd3vFJnRVfoiqTMNOV +3R3+tIm04gDbtGxIDuWL+No/r/KldFxwbLqYTXDOaa145YI2aZ3GZ3P6GFYls6h7 +PeUqlXv7yWx1jfcMIZPeupHwWESYCCLkpvBHFZftWhc/FChUmgr417vmQC2eGwuX +LyRdu+Lv9NmSzsO6A+w8ss62ewC02LXkXAG2Prd1GYuScsq94GcE5lguC5TVkdTm +tQMDmET/KvitG3vLB9AIkKnjZdi1Ml7Ow6dByQIDAQABAoIBAFo7ketrH2z8d855 +mAG0/z/hEOSuG3au7MWk7NiEbBdjrJC9epJqSSjX0AtiHdf9NnZsne2aeuv1NMZo +3ysRDrGGLz5xc9Tl0VQF98/W5nrrSQTKV9IGaJn+SBUPIDEYiqFiZ4xvHozME9eo +o0z/03Acm4o9mj7U/Us95o0SzCRl3QKgAWeSS36Ks0OmDJfNuYBWf+WA7Fte9NUp +yOOm2fGejoge9eMcJY3/7HckrESscMECZMUL1hBbVD939d4S4AvM6YWTErAa9uq9 +APsXdu5IYglonqw6oc4TtN9bI9gbHKTyiFgi42gM6qcN2ixpQ78ufktLcJLBTLzi +jP5f5cUCgYEA7whtTRG+KN3nAaRy5gU3JDdOIM1tlAVjwtvUIre3sf6p6Bzs0+RL +DVdOidJB+8wnV6hF64+juHS27Y7t4ONt2VRFNmY3yRlb9MwqOYlqGaOOewgY+Gab +ZC4GBKmMRKW0LpeRHghpCeyeRRKr5tkYalyU9/C+mxIFpb0/NZZXh6sCgYEA/wmG +s+npJH2Xs17Yd3wwroZpFAlG2wRCd0IS14brKr+w5czbGis8Pz/LCVrNH+6ZkvoI +gUpTDwY7egt9O2iCIeSeq82Ov+g9WiDa150YTq8av09N7AZ13Na+SU5aNpPwIOEZ +WX8dygNloSh4JDjOhrwigRtcMmYCtpKVS792GFsCgYEA6QEB6rp870E/ya4QAoDa ++4adtgQJ6NxIHs5Cv4Tun6dq4EQx52sGbf7JJDe88kJTp3L0lWbzZP8AwhktcKbB +kbQ/s4N4paL+rGXIU0XMEyoH3Y5LKPh8SO9EFo9fmBsexLwiTXBNU8s/jH1i7Ch7 +UFLnM7mNU4QB1Ungr8/ZivkCgYA6sA2ATz5oOEJ1c0jqzfhB4QpDIxNcCPHmkZzW +XeS11KC3cNmmfvaBM4PcZjm3tGdArCrS3bCZT3zWS9iImDcB56Mfs9C6lo2vtMnH +Pg4+5QqJpY0v2Bi9NelZ4x7dWlOyrTnxH1BSkU+Ms0xaQXw9AwQJo6smqdTMAJU8 +dhWN6wKBgQDRAjpfV77FE32XsBkHyGlaIanxu+gSJVLaHStZLV8qTvn//lnmr5Be +abK4smlwHp5ZnTwqP3gMh/b8vfyPYHdK/2rCKSCEPf/JHtoABsw9Hvkr/N99MZQd +S2l3PYQoQ8smUVYNWhdYvdRER8WFTk5rPX6fEoVne/sArxlwk8+8nw== -----END RSA PRIVATE KEY----- diff --git a/interop/data/server1.pem b/interop/data/server1.pem index 3bcad6b8d..a64fc275e 100644 --- a/interop/data/server1.pem +++ b/interop/data/server1.pem @@ -1,20 +1,20 @@ -----BEGIN CERTIFICATE----- -MIIDTDCCAjSgAwIBAgIRAOV5RsOfZrNiE0WwnPv5MSswDQYJKoZIhvcNAQELBQAw +MIIDTDCCAjSgAwIBAgIRAL1ZcIwdi/AfgLm2T41fHO4wDQYJKoZIhvcNAQELBQAw PTEOMAwGA1UEChMFVG9raW8xEDAOBgNVBAsTB1Rlc3RpbmcxGTAXBgNVBAMTEFRv -bmljIFRlc3RpbmcgQ0EwHhcNMjQxMTEzMTk0NDMwWhcNMjkxMTEyMTk0NDMwWjAh +bmljIFRlc3RpbmcgQ0EwHhcNMjUwNTAxMTc0MjU4WhcNMzAwNDMwMTc0MjU4WjAh MR8wHQYDVQQDExZUb25pYyBUZXN0IFNlcnZlciBDZXJ0MIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEA3IN57h6uzOii4giaDPPMhjc1Dm0gpX4GiwKCeo8j -hhN37YjKOLCLGEYOoyBoMiYr/A5neJ6eixpp9qHGlq3youOHW81dEGqqtasTH7jd -XFbnIY72Nz8DftzxD8XXrQu8ZjzHWRbFcmvApWEt/7GE1mhUAIDURTdf4JkqeOCV -RnGWNat1owjypqjhpXGYG/TTfqT+Tw5987O3ZotBjcv//WLEmx1GhKKYKCFeDiRH -9ozEtInHvBOP7u2kE1kU+7V1ijG/vdHSS5/E/KrXMVqxzoES58XNtUqsRH09Gji8 -ad/cD5GCk/3f9ZzUFVO33pnnyY5by6QbBevKQe1Jki0WTwIDAQABo2MwYTATBgNV -HSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMB8GA1UdIwQYMBaAFOpOJS3V -SO2AjQPXkAW/ITt5yBVUMBsGA1UdEQQUMBKCECoudGVzdC5nb29nbGUuZnIwDQYJ -KoZIhvcNAQELBQADggEBABdvT3D98vfZxGPXddVG6SYMXLzoWrvF4R0joI2xzaiZ -XlLwo2pPiYY/mUNjNEQCbz9cV4E+EWFzP36YSiFpiQHH65zhgMRMBTlwe6ma+ksJ -kiBwwcDaugDGBrC5YGAypMB+a/8PZiUkODp6S/A4DgNCO4RO+c+lG1QCO335i41A -hVJZVQUKQ2t8SbkVQmugDfZFrp/fDJVRGKwNKWM1d3B40/6TaUc0z9jHBGOzy3GI -ZcO2Di5vqE+gY0oSb4MLlR3PIavPQvNhHCejD6qxIpivOMEpdNv2oAkcfoePuJQ3 -TfqJO8ybedfAGYf/p70Dw45lY7+/aGLIhnQ8nHQmUZE= +AQEFAAOCAQ8AMIIBCgKCAQEA7iJJ8gLlKsp+r15CR15Iz2rmi3f3OZmA8FZ0hpB5 +hNkQHfVARlC2yawIfHiLO4tpUmjtX8iq3RXPkKPYP5Zfd1BDLdR/2qhd3vFJnRVf +oiqTMNOV3R3+tIm04gDbtGxIDuWL+No/r/KldFxwbLqYTXDOaa145YI2aZ3GZ3P6 +GFYls6h7PeUqlXv7yWx1jfcMIZPeupHwWESYCCLkpvBHFZftWhc/FChUmgr417vm +QC2eGwuXLyRdu+Lv9NmSzsO6A+w8ss62ewC02LXkXAG2Prd1GYuScsq94GcE5lgu +C5TVkdTmtQMDmET/KvitG3vLB9AIkKnjZdi1Ml7Ow6dByQIDAQABo2MwYTATBgNV +HSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMB8GA1UdIwQYMBaAFOACNK1K +BihcXtIFyGLo64Z7jbKHMBsGA1UdEQQUMBKCECoudGVzdC5nb29nbGUuZnIwDQYJ +KoZIhvcNAQELBQADggEBAJP9h4voqemt8Jiw9lgXKOfZyydIHKvL8oeeNQLnn+Ch +S8D32xRxDeql0oghbTFj1AUxs5X415YgyP4JBoQ8X+L7z3hvSHHildJjbDAM5l+D +jHIr/G6+N6DzLi75WUpZkHFa0ZZ+jHkrxRFq3SsS2hzL93sZ8HoLoEXgGJYcuVYh +duWmy1pv/TW8j3GcRE358rLyIzsAK2tJZOHC3MeDqvITfGfzeHxy/UG2bbGmXU8Z +UoCFUGHhukNuESQFfPxoHsWnsxvCIvcIxGPj4NXSO0WJ9r7/A+UczSr+Vuc55h0E +qrAl9EXltUWTjRZwdIvvas9N3y0ApxkMFNIRmMwUBGE= -----END CERTIFICATE----- diff --git a/interop/src/bin/client.rs b/interop/src/bin/client.rs index 851fa0a3e..01c279200 100644 --- a/interop/src/bin/client.rs +++ b/interop/src/bin/client.rs @@ -32,7 +32,7 @@ async fn main() -> Result<(), Box> { let scheme = if matches.use_tls { "https" } else { "http" }; #[allow(unused_mut)] - let mut endpoint = Endpoint::try_from(format!("{}://localhost:10000", scheme))? + let mut endpoint = Endpoint::try_from(format!("{scheme}://localhost:10000"))? .timeout(Duration::from_secs(5)) .concurrency_limit(30); @@ -54,7 +54,7 @@ async fn main() -> Result<(), Box> { let mut failures = Vec::new(); for test_case in test_cases { - println!("{:?}:", test_case); + println!("{test_case:?}:"); let mut test_results = Vec::new(); match test_case { @@ -87,7 +87,7 @@ async fn main() -> Result<(), Box> { } for result in test_results { - println!(" {}", result); + println!(" {result}"); if result.is_failed() { failures.push(result); diff --git a/interop/test.sh b/interop/test.sh index 1814b2df4..c4628d164 100755 --- a/interop/test.sh +++ b/interop/test.sh @@ -47,7 +47,7 @@ TLS_CRT="interop/data/server1.pem" TLS_KEY="interop/data/server1.key" # run the test server -./"${SERVER}" ${ARG} --tls_cert_file $TLS_CRT --tls_key_file $TLS_KEY & +./"${SERVER}" "${ARG}" --tls_cert_file $TLS_CRT --tls_key_file $TLS_KEY & SERVER_PID=$! echo ":; started grpc-go test server." @@ -57,12 +57,12 @@ trap 'echo ":; killing test server"; kill ${SERVER_PID};' EXIT sleep 1 -./target/debug/client --test_case="${JOINED_TEST_CASES}" ${ARG} +./target/debug/client --test_case="${JOINED_TEST_CASES}" "${ARG}" -echo ":; killing test server"; kill ${SERVER_PID}; +echo ":; killing test server"; kill "${SERVER_PID}"; # run the test server -./target/debug/server ${ARG} & +./target/debug/server "${ARG}" & SERVER_PID=$! echo ":; started tonic test server." @@ -72,14 +72,24 @@ trap 'echo ":; killing test server"; kill ${SERVER_PID};' EXIT sleep 1 -./target/debug/client --test_case="${JOINED_TEST_CASES}" ${ARG} - -TLS_ARGS="" - -if [ -n "${ARG}" ]; then - TLS_ARGS="--use_tls --use_test_ca --server_host_override=foo.test.google.fr --ca_file=${TLS_CA}" +./target/debug/client --test_case="${JOINED_TEST_CASES}" "${ARG}" + +# Run client test cases +if [ -n "${ARG:-}" ]; then + TLS_ARRAY=( \ + -use_tls \ + -use_test_ca \ + -server_host_override=foo.test.google.fr \ + -ca_file="${TLS_CA}" \ + ) +else + TLS_ARRAY=() fi for CASE in "${TEST_CASES[@]}"; do - interop/bin/client_${OS}_amd64${EXT} --test_case="${CASE}" ${TLS_ARGS} + flags=( "-test_case=${CASE}" ) + flags+=( "${TLS_ARRAY[@]}" ) + + interop/bin/client_"${OS}"_amd64"${EXT}" "${flags[@]}" done + diff --git a/interop/update_binaries.sh b/interop/update_binaries.sh index 79d3ba4ba..c11f93728 100755 --- a/interop/update_binaries.sh +++ b/interop/update_binaries.sh @@ -1,3 +1,4 @@ +#!/bin/bash set -e # This script updates server and client go binaries for interop tests. @@ -30,4 +31,4 @@ for ROLE in $ROLES; do done done -rm -rf ../grpc-go \ No newline at end of file +rm -rf ../grpc-go diff --git a/prepare-release.sh b/prepare-release.sh index 1c5254bd4..365deacb6 100755 --- a/prepare-release.sh +++ b/prepare-release.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Script which automates modifying source version fields, and creating a release # commit and tag. The commit and tag are not automatically pushed, nor are the @@ -14,7 +14,7 @@ fi DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" VERSION="$1" -MINOR="$( echo ${VERSION} | cut -d\. -f1-2 )" +MINOR="$( echo "${VERSION}" | cut -d\. -f1-2 )" VERSION_MATCHER="([a-z0-9\\.-]+)" TONIC_CRATE_MATCHER="(tonic|tonic-[a-z]+)" diff --git a/publish-release.sh b/publish-release.sh index f2adaa489..83cc46d61 100755 --- a/publish-release.sh +++ b/publish-release.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Script which automates publishing a crates.io release of the prost crates. diff --git a/tests/ambiguous_methods/Cargo.toml b/tests/ambiguous_methods/Cargo.toml index 53b282153..dbd7b1946 100644 --- a/tests/ambiguous_methods/Cargo.toml +++ b/tests/ambiguous_methods/Cargo.toml @@ -7,7 +7,7 @@ name = "ambiguous_methods" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic"} [build-dependencies] diff --git a/tests/compression/Cargo.toml b/tests/compression/Cargo.toml index 7371ca3ae..13616d0d7 100644 --- a/tests/compression/Cargo.toml +++ b/tests/compression/Cargo.toml @@ -9,11 +9,10 @@ bytes = "1" http = "1" http-body = "1" http-body-util = "0.1" -hyper = "1" hyper-util = "0.1" paste = "1.0.12" pin-project = "1.0" -prost = "0.13" +prost = "0.14" tokio = {version = "1.0", features = ["macros", "rt-multi-thread", "net"]} tokio-stream = "0.1" tonic = {path = "../../tonic", features = ["gzip", "deflate", "zstd"]} diff --git a/tests/compression/src/bidirectional_stream.rs b/tests/compression/src/bidirectional_stream.rs index 3d7fe46f5..c30d8796d 100644 --- a/tests/compression/src/bidirectional_stream.rs +++ b/tests/compression/src/bidirectional_stream.rs @@ -89,7 +89,7 @@ async fn client_enabled_server_enabled(encoding: CompressionEncoding) { CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!(res.metadata().get("grpc-encoding").unwrap(), expected); diff --git a/tests/compression/src/client_stream.rs b/tests/compression/src/client_stream.rs index fe31aa01c..bb24f3fe0 100644 --- a/tests/compression/src/client_stream.rs +++ b/tests/compression/src/client_stream.rs @@ -161,14 +161,11 @@ async fn client_enabled_server_disabled(encoding: CompressionEncoding) { CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!( status.message(), - format!( - "Content is compressed with `{}` which isn't supported", - expected - ) + format!("Content is compressed with `{expected}` which isn't supported") ); } @@ -218,7 +215,7 @@ async fn compressing_response_from_client_stream(encoding: CompressionEncoding) CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!(res.metadata().get("grpc-encoding").unwrap(), expected); let bytes_sent = response_bytes_counter.load(SeqCst); diff --git a/tests/compression/src/compressing_request.rs b/tests/compression/src/compressing_request.rs index ed64f4e9a..ff710f038 100644 --- a/tests/compression/src/compressing_request.rs +++ b/tests/compression/src/compressing_request.rs @@ -177,14 +177,11 @@ async fn client_enabled_server_disabled(encoding: CompressionEncoding) { CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!( status.message(), - format!( - "Content is compressed with `{}` which isn't supported", - expected - ) + format!("Content is compressed with `{expected}` which isn't supported") ); assert_eq!( diff --git a/tests/compression/src/compressing_response.rs b/tests/compression/src/compressing_response.rs index 28ac95005..85c5a481e 100644 --- a/tests/compression/src/compressing_response.rs +++ b/tests/compression/src/compressing_response.rs @@ -46,7 +46,7 @@ async fn client_enabled_server_enabled(encoding: CompressionEncoding) { .unwrap() .to_str() .unwrap(), - format!("{},identity", expected) + format!("{expected},identity") ); self.service.call(req) } @@ -88,7 +88,7 @@ async fn client_enabled_server_enabled(encoding: CompressionEncoding) { CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; for _ in 0..3 { @@ -353,7 +353,7 @@ async fn disabling_compression_on_single_response(encoding: CompressionEncoding) CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!(res.metadata().get("grpc-encoding").unwrap(), expected); @@ -411,7 +411,7 @@ async fn disabling_compression_on_response_but_keeping_compression_on_stream( CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!(res.metadata().get("grpc-encoding").unwrap(), expected); @@ -482,7 +482,7 @@ async fn disabling_compression_on_response_from_client_stream(encoding: Compress CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!(res.metadata().get("grpc-encoding").unwrap(), expected); let bytes_sent = response_bytes_counter.load(SeqCst); diff --git a/tests/compression/src/server_stream.rs b/tests/compression/src/server_stream.rs index 775149ac2..7a6e1dffe 100644 --- a/tests/compression/src/server_stream.rs +++ b/tests/compression/src/server_stream.rs @@ -47,7 +47,7 @@ async fn client_enabled_server_enabled(encoding: CompressionEncoding) { CompressionEncoding::Gzip => "gzip", CompressionEncoding::Zstd => "zstd", CompressionEncoding::Deflate => "deflate", - _ => panic!("unexpected encoding {:?}", encoding), + _ => panic!("unexpected encoding {encoding:?}"), }; assert_eq!(res.metadata().get("grpc-encoding").unwrap(), expected); diff --git a/tests/default_stubs/Cargo.toml b/tests/default_stubs/Cargo.toml index 45f1e74d1..da878513c 100644 --- a/tests/default_stubs/Cargo.toml +++ b/tests/default_stubs/Cargo.toml @@ -7,12 +7,10 @@ name = "default_stubs" [dependencies] tokio = {version = "1.0", features = ["macros", "rt-multi-thread", "net"]} tokio-stream = {version = "0.1", features = ["net"]} -prost = "0.13" -rand = "0.9" tonic = {path = "../../tonic"} +[dev-dependencies] +tempfile = "3.20" + [build-dependencies] tonic-build = {path = "../../tonic-build" } - -[package.metadata.cargo-machete] -ignored = ["prost"] diff --git a/tests/default_stubs/src/lib.rs b/tests/default_stubs/src/lib.rs index bbf98d4ae..1b9fa78a9 100644 --- a/tests/default_stubs/src/lib.rs +++ b/tests/default_stubs/src/lib.rs @@ -1,16 +1,12 @@ -#![allow(unused_imports)] - -mod test_defaults; - use std::pin::Pin; -use tokio_stream::{Stream, StreamExt}; +use tokio_stream::Stream; use tonic::{Request, Response, Status, Streaming}; tonic::include_proto!("test"); tonic::include_proto!("test_default"); #[derive(Debug, Default)] -struct Svc; +pub struct Svc; #[tonic::async_trait] impl test_server::Test for Svc { diff --git a/tests/default_stubs/src/test_defaults.rs b/tests/default_stubs/tests/default.rs similarity index 88% rename from tests/default_stubs/src/test_defaults.rs rename to tests/default_stubs/tests/default.rs index 01d5e358a..ed9c3c81d 100644 --- a/tests/default_stubs/src/test_defaults.rs +++ b/tests/default_stubs/tests/default.rs @@ -1,21 +1,15 @@ -#![allow(unused_imports)] - -use crate::test_client::TestClient; -use crate::*; -use rand::Rng as _; -use std::env; -use std::fs; +use default_stubs::test_client::TestClient; +use default_stubs::*; use std::net::SocketAddr; use tokio::net::TcpListener; +use tokio_stream::{Stream, StreamExt}; use tonic::transport::Channel; use tonic::transport::Server; -#[cfg(test)] fn echo_requests_iter() -> impl Stream { tokio_stream::iter(1..usize::MAX).map(|_| ()) } -#[cfg(test)] async fn test_default_stubs( mut client: TestClient, mut client_default_stubs: TestClient, @@ -100,7 +94,6 @@ async fn test_default_stubs_uds() { test_default_stubs(client, client_default_stubs).await; } -#[cfg(test)] async fn run_services_in_background() -> (SocketAddr, SocketAddr) { let svc = test_server::TestServer::new(Svc {}); let svc_default_stubs = test_default_server::TestDefaultServer::new(Svc {}); @@ -132,29 +125,26 @@ async fn run_services_in_background() -> (SocketAddr, SocketAddr) { (addr, addr_default_stubs) } -#[cfg(all(test, not(target_os = "windows")))] +#[cfg(not(target_os = "windows"))] async fn run_services_in_background_uds() -> (String, String) { use tokio::net::UnixListener; let svc = test_server::TestServer::new(Svc {}); let svc_default_stubs = test_default_server::TestDefaultServer::new(Svc {}); - let mut rng = rand::rng(); - let suffix: String = (0..8) - .map(|_| rng.sample(rand::distr::Alphanumeric) as char) - .collect(); - let tmpdir = fs::canonicalize(env::temp_dir()) + let tmpdir = tempfile::Builder::new() + .prefix("tonic-test-") + .tempdir() .unwrap() - .join(format!("tonic_test_{}", suffix)); - fs::create_dir(&tmpdir).unwrap(); + .keep(); let uds_filepath = tmpdir.join("impl.sock").to_str().unwrap().to_string(); let listener = UnixListener::bind(uds_filepath.as_str()).unwrap(); - let uds_addr = format!("unix://{}", uds_filepath); + let uds_addr = format!("unix://{uds_filepath}"); let uds_default_stubs_filepath = tmpdir.join("stub.sock").to_str().unwrap().to_string(); let listener_default_stubs = UnixListener::bind(uds_default_stubs_filepath.as_str()).unwrap(); - let uds_default_stubs_addr = format!("unix://{}", uds_default_stubs_filepath); + let uds_default_stubs_addr = format!("unix://{uds_default_stubs_filepath}"); tokio::spawn(async move { Server::builder() diff --git a/tests/deprecated_methods/Cargo.toml b/tests/deprecated_methods/Cargo.toml index 86a530e0b..720069330 100644 --- a/tests/deprecated_methods/Cargo.toml +++ b/tests/deprecated_methods/Cargo.toml @@ -6,9 +6,8 @@ license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = { path = "../../tonic" } [build-dependencies] -prost-build = "0.13" tonic-build = { path = "../../tonic-build" } diff --git a/tests/disable_comments/Cargo.toml b/tests/disable_comments/Cargo.toml index 00339be4b..582f9626f 100644 --- a/tests/disable_comments/Cargo.toml +++ b/tests/disable_comments/Cargo.toml @@ -7,9 +7,8 @@ name = "disable-comments" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = { path = "../../tonic" } [build-dependencies] -prost-build = "0.13" tonic-build = { path = "../../tonic-build" } diff --git a/tests/disable_comments/build.rs b/tests/disable_comments/build.rs index 941f21a9c..6fcdc0afd 100644 --- a/tests/disable_comments/build.rs +++ b/tests/disable_comments/build.rs @@ -1,5 +1,5 @@ fn main() { - let mut config = prost_build::Config::default(); + let mut config = tonic_build::Config::default(); config.disable_comments(["test.Input1", "test.Output1"]); tonic_build::configure() .disable_comments("test.Service1") diff --git a/tests/extern_path/my_application/Cargo.toml b/tests/extern_path/my_application/Cargo.toml index b3ce30e8f..a63cf16d8 100644 --- a/tests/extern_path/my_application/Cargo.toml +++ b/tests/extern_path/my_application/Cargo.toml @@ -7,7 +7,7 @@ name = "my_application" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../../tonic"} uuid = {package = "uuid1", path = "../uuid"} diff --git a/tests/extern_path/uuid/Cargo.toml b/tests/extern_path/uuid/Cargo.toml index e8561af95..449919cc4 100644 --- a/tests/extern_path/uuid/Cargo.toml +++ b/tests/extern_path/uuid/Cargo.toml @@ -7,6 +7,6 @@ name = "uuid1" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" [build-dependencies] -prost-build = "0.13" +prost-build = "0.14" diff --git a/tests/included_service/Cargo.toml b/tests/included_service/Cargo.toml index 5d69bd3a8..4b2bc3aa6 100644 --- a/tests/included_service/Cargo.toml +++ b/tests/included_service/Cargo.toml @@ -7,7 +7,7 @@ name = "included_service" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic"} [build-dependencies] diff --git a/tests/integration_tests/Cargo.toml b/tests/integration_tests/Cargo.toml index 166f52df8..e642bb1e4 100644 --- a/tests/integration_tests/Cargo.toml +++ b/tests/integration_tests/Cargo.toml @@ -8,13 +8,12 @@ name = "integration-tests" [dependencies] bytes = "1.0" -prost = "0.13" +prost = "0.14" tokio = {version = "1.0", features = ["macros", "rt-multi-thread", "net", "sync"]} tonic = {path = "../../tonic"} tracing-subscriber = {version = "0.3"} [dev-dependencies] -async-stream = "0.3" http = "1" http-body = "1" hyper-util = "0.1" @@ -23,7 +22,6 @@ tokio-stream = {version = "0.1.5", features = ["net"]} tower = "0.5" tower-http = { version = "0.6", features = ["set-header", "trace"] } tower-service = "0.3" -tracing = "0.1" [build-dependencies] tonic-build = {path = "../../tonic-build"} diff --git a/tests/integration_tests/tests/interceptor.rs b/tests/integration_tests/tests/interceptor.rs index aec9c35fe..0e10129a4 100644 --- a/tests/integration_tests/tests/interceptor.rs +++ b/tests/integration_tests/tests/interceptor.rs @@ -41,7 +41,7 @@ async fn interceptor_retrieves_grpc_method() { .connect_lazy(); fn client_intercept(req: Request<()>) -> Result, Status> { - println!("Intercepting client request: {:?}", req); + println!("Intercepting client request: {req:?}"); let gm = req.extensions().get::().unwrap(); assert_eq!(gm.service(), "test.Test"); diff --git a/tests/integration_tests/tests/load_shed.rs b/tests/integration_tests/tests/load_shed.rs new file mode 100644 index 000000000..746d6a1be --- /dev/null +++ b/tests/integration_tests/tests/load_shed.rs @@ -0,0 +1,61 @@ +use integration_tests::pb::{test_client, test_server, Input, Output}; +use std::net::SocketAddr; +use tokio::net::TcpListener; +use tonic::{transport::Server, Code, Request, Response, Status}; + +#[tokio::test] +async fn service_resource_exhausted() { + let addr = run_service_in_background(0).await; + + let mut client = test_client::TestClient::connect(format!("http://{}", addr)) + .await + .unwrap(); + + let req = Request::new(Input {}); + let res = client.unary_call(req).await; + + let err = res.unwrap_err(); + assert_eq!(err.code(), Code::ResourceExhausted); +} + +#[tokio::test] +async fn service_resource_not_exhausted() { + let addr = run_service_in_background(1).await; + + let mut client = test_client::TestClient::connect(format!("http://{}", addr)) + .await + .unwrap(); + + let req = Request::new(Input {}); + let res = client.unary_call(req).await; + + assert!(res.is_ok()); +} + +async fn run_service_in_background(concurrency_limit: usize) -> SocketAddr { + struct Svc; + + #[tonic::async_trait] + impl test_server::Test for Svc { + async fn unary_call(&self, _req: Request) -> Result, Status> { + Ok(Response::new(Output {})) + } + } + + let svc = test_server::TestServer::new(Svc {}); + + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + + tokio::spawn(async move { + Server::builder() + .concurrency_limit_per_connection(concurrency_limit) + .load_shed(true) + .add_service(svc) + .serve_with_incoming(tokio_stream::wrappers::TcpListenerStream::new(listener)) + .await + .unwrap(); + }); + + addr +} diff --git a/tests/integration_tests/tests/max_message_size.rs b/tests/integration_tests/tests/max_message_size.rs index 0de102066..de6c91f90 100644 --- a/tests/integration_tests/tests/max_message_size.rs +++ b/tests/integration_tests/tests/max_message_size.rs @@ -245,14 +245,11 @@ fn assert_test_case(case: TestCase) { (Some(_), Ok(())) => panic!("Expected failure, but got success"), (Some(code), Err(status)) => { if status.code() != code { - panic!( - "Expected failure, got failure but wrong code, got: {:?}", - status - ) + panic!("Expected failure, got failure but wrong code, got: {status:?}") } } - (None, Err(status)) => panic!("Expected success, but got failure, got: {:?}", status), + (None, Err(status)) => panic!("Expected success, but got failure, got: {status:?}"), _ => (), } diff --git a/tests/integration_tests/tests/timeout.rs b/tests/integration_tests/tests/timeout.rs index 450a67d21..6fb1b885b 100644 --- a/tests/integration_tests/tests/timeout.rs +++ b/tests/integration_tests/tests/timeout.rs @@ -7,7 +7,7 @@ use tonic::{transport::Server, Code, Request, Response, Status}; async fn cancelation_on_timeout() { let addr = run_service_in_background(Duration::from_secs(1), Duration::from_secs(100)).await; - let mut client = test_client::TestClient::connect(format!("http://{}", addr)) + let mut client = test_client::TestClient::connect(format!("http://{addr}")) .await .unwrap(); @@ -27,7 +27,7 @@ async fn cancelation_on_timeout() { async fn picks_server_timeout_if_thats_sorter() { let addr = run_service_in_background(Duration::from_secs(1), Duration::from_millis(100)).await; - let mut client = test_client::TestClient::connect(format!("http://{}", addr)) + let mut client = test_client::TestClient::connect(format!("http://{addr}")) .await .unwrap(); @@ -46,7 +46,7 @@ async fn picks_server_timeout_if_thats_sorter() { async fn picks_client_timeout_if_thats_sorter() { let addr = run_service_in_background(Duration::from_secs(1), Duration::from_secs(100)).await; - let mut client = test_client::TestClient::connect(format!("http://{}", addr)) + let mut client = test_client::TestClient::connect(format!("http://{addr}")) .await .unwrap(); diff --git a/tests/root-crate-path/Cargo.toml b/tests/root-crate-path/Cargo.toml index ab0b2df1e..10206644d 100644 --- a/tests/root-crate-path/Cargo.toml +++ b/tests/root-crate-path/Cargo.toml @@ -5,7 +5,7 @@ license = "MIT" name = "root-crate-path" [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic"} [build-dependencies] diff --git a/tests/same_name/Cargo.toml b/tests/same_name/Cargo.toml index 9929d4fed..9841f6625 100644 --- a/tests/same_name/Cargo.toml +++ b/tests/same_name/Cargo.toml @@ -7,7 +7,7 @@ name = "same_name" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic"} [build-dependencies] diff --git a/tests/service_named_result/Cargo.toml b/tests/service_named_result/Cargo.toml index 73bb95ce8..5d8010a42 100644 --- a/tests/service_named_result/Cargo.toml +++ b/tests/service_named_result/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic"} [build-dependencies] diff --git a/tests/service_named_service/Cargo.toml b/tests/service_named_service/Cargo.toml index 331b34aad..373059896 100644 --- a/tests/service_named_service/Cargo.toml +++ b/tests/service_named_service/Cargo.toml @@ -7,7 +7,7 @@ name = "service_named_service" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic"} [build-dependencies] diff --git a/tests/skip_debug/Cargo.toml b/tests/skip_debug/Cargo.toml index e1a2a20da..70884156a 100644 --- a/tests/skip_debug/Cargo.toml +++ b/tests/skip_debug/Cargo.toml @@ -7,8 +7,11 @@ name = "skip_debug" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = { path = "../../tonic" } +[dev-dependencies] +static_assertions = "1" + [build-dependencies] tonic-build = { path = "../../tonic-build" } diff --git a/tests/skip_debug/src/lib.rs b/tests/skip_debug/src/lib.rs index 46908f15d..79e2e11ca 100644 --- a/tests/skip_debug/src/lib.rs +++ b/tests/skip_debug/src/lib.rs @@ -1,11 +1,6 @@ pub mod pb { tonic::include_proto!("test"); - - // Add a dummy impl Debug to the skipped debug implementations to avoid - // missing impl Debug errors and check debug is not implemented for Output. - impl std::fmt::Debug for Output { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Output").finish() - } - } } + +#[cfg(test)] +static_assertions::assert_not_impl_all!(pb::Output: std::fmt::Debug); diff --git a/tests/skip_debug/tests/skip_debug.rs b/tests/skip_debug/tests/skip_debug.rs deleted file mode 100644 index a29d998d7..000000000 --- a/tests/skip_debug/tests/skip_debug.rs +++ /dev/null @@ -1,8 +0,0 @@ -use std::{fs, path::PathBuf}; - -#[test] -fn skip_debug() { - let path = PathBuf::from(std::env::var("OUT_DIR").unwrap()).join("test.rs"); - let s = fs::read_to_string(path).unwrap(); - assert!(s.contains("#[prost(skip_debug)]\npub struct Output {}")); -} diff --git a/tests/stream_conflict/Cargo.toml b/tests/stream_conflict/Cargo.toml index 20ef202c8..acb208ebc 100644 --- a/tests/stream_conflict/Cargo.toml +++ b/tests/stream_conflict/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" +prost = "0.14" tonic = { path = "../../tonic" } [build-dependencies] diff --git a/tests/use_arc_self/Cargo.toml b/tests/use_arc_self/Cargo.toml index 5bdd492f7..fd7998602 100644 --- a/tests/use_arc_self/Cargo.toml +++ b/tests/use_arc_self/Cargo.toml @@ -5,12 +5,8 @@ license = "MIT" name = "use_arc_self" [dependencies] -tokio-stream = "0.1" -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic", features = ["gzip"]} [build-dependencies] tonic-build = {path = "../../tonic-build" } - -[package.metadata.cargo-machete] -ignored = ["prost"] diff --git a/tests/use_arc_self/src/lib.rs b/tests/use_arc_self/src/lib.rs index 6b12b588e..b2050ee2d 100644 --- a/tests/use_arc_self/src/lib.rs +++ b/tests/use_arc_self/src/lib.rs @@ -1,7 +1,4 @@ -#![allow(unused_imports)] - use std::sync::Arc; -use tokio_stream::{Stream, StreamExt}; use tonic::{Request, Response, Status}; tonic::include_proto!("test"); diff --git a/tests/web/Cargo.toml b/tests/web/Cargo.toml index b1e09d612..cd449258a 100644 --- a/tests/web/Cargo.toml +++ b/tests/web/Cargo.toml @@ -7,11 +7,10 @@ license = "MIT" [dependencies] base64 = "0.22" bytes = "1.0" -http-body = "1" http-body-util = "0.1" hyper = "1" hyper-util = "0.1" -prost = "0.13" +prost = "0.14" tokio = { version = "1", features = ["macros", "rt", "net"] } tokio-stream = { version = "0.1", features = ["net"] } tonic = { path = "../../tonic" } diff --git a/tests/web/build.rs b/tests/web/build.rs index 1abe19299..0a7bed094 100644 --- a/tests/web/build.rs +++ b/tests/web/build.rs @@ -7,5 +7,5 @@ fn main() { protos .iter() - .for_each(|file| println!("cargo:rerun-if-changed={}", file)); + .for_each(|file| println!("cargo:rerun-if-changed={file}")); } diff --git a/tests/web/tests/grpc_web.rs b/tests/web/tests/grpc_web.rs index c0c3c3fdd..32e0b4737 100644 --- a/tests/web/tests/grpc_web.rs +++ b/tests/web/tests/grpc_web.rs @@ -120,14 +120,14 @@ fn build_request(base_uri: String, content_type: &str, accept: &str) -> Request< "grpc-web-text" => test_web::util::base64::STANDARD .encode(encode_body()) .into(), - _ => panic!("invalid content type {}", content_type), + _ => panic!("invalid content type {content_type}"), }; Request::builder() .method(Method::POST) - .header(CONTENT_TYPE, format!("application/{}", content_type)) + .header(CONTENT_TYPE, format!("application/{content_type}")) .header(ORIGIN, "http://example.com") - .header(ACCEPT, format!("application/{}", accept)) + .header(ACCEPT, format!("application/{accept}")) .uri(request_uri) .body(Body::new( Full::new(bytes).map_err(|err| Status::internal(err.to_string())), diff --git a/tests/wellknown-compiled/Cargo.toml b/tests/wellknown-compiled/Cargo.toml index 247dc097c..aad64cb0f 100644 --- a/tests/wellknown-compiled/Cargo.toml +++ b/tests/wellknown-compiled/Cargo.toml @@ -10,9 +10,8 @@ name = "wellknown-compiled" doctest = false [dependencies] -prost = "0.13" +prost = "0.14" tonic = {path = "../../tonic"} [build-dependencies] -prost-build = "0.13" tonic-build = {path = "../../tonic-build"} diff --git a/tests/wellknown-compiled/build.rs b/tests/wellknown-compiled/build.rs index 3aadd48cc..b348b8805 100644 --- a/tests/wellknown-compiled/build.rs +++ b/tests/wellknown-compiled/build.rs @@ -1,13 +1,7 @@ fn main() { - let mut config = prost_build::Config::new(); - config.extern_path(".google.protobuf.Empty", "()"); - tonic_build::configure() + .extern_path(".google.protobuf.Empty", "()") .compile_well_known_types(true) - .compile_protos_with_config( - config, - &["proto/google.proto", "proto/test.proto"], - &["proto"], - ) + .compile_protos(&["proto/google.proto", "proto/test.proto"], &["proto"]) .unwrap(); } diff --git a/tests/wellknown/Cargo.toml b/tests/wellknown/Cargo.toml index 3bf0edfdb..8c72f2716 100644 --- a/tests/wellknown/Cargo.toml +++ b/tests/wellknown/Cargo.toml @@ -7,8 +7,8 @@ name = "wellknown" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.13" -prost-types = "0.13" +prost = "0.14" +prost-types = "0.14" tonic = {path = "../../tonic"} [build-dependencies] diff --git a/tonic-build/Cargo.toml b/tonic-build/Cargo.toml index e47b3307e..bb2e7ef01 100644 --- a/tonic-build/Cargo.toml +++ b/tonic-build/Cargo.toml @@ -11,14 +11,14 @@ license = "MIT" name = "tonic-build" readme = "README.md" repository = "https://github.com/hyperium/tonic" -version = "0.13.0" +version = "0.14.0" rust-version = { workspace = true } [dependencies] prettyplease = { version = "0.2" } proc-macro2 = "1.0" -prost-build = { version = "0.13", optional = true } -prost-types = { version = "0.13", optional = true } +prost-build = { version = "0.14", optional = true } +prost-types = { version = "0.14", optional = true } quote = "1.0" syn = "2.0" diff --git a/tonic-build/LICENSE b/tonic-build/LICENSE deleted file mode 100644 index 307709840..000000000 --- a/tonic-build/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2020 Lucio Franco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/tonic-build/LICENSE b/tonic-build/LICENSE new file mode 120000 index 000000000..ea5b60640 --- /dev/null +++ b/tonic-build/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/tonic-build/README.md b/tonic-build/README.md index 462d4d515..ab67deccc 100644 --- a/tonic-build/README.md +++ b/tonic-build/README.md @@ -2,6 +2,15 @@ Compiles proto files via prost and generates service stubs and proto definitions for use with tonic. +# Feature flags + +- `cleanup-markdown`: Enables cleaning up documentation from the generated code. + Useful when documentation of the generated code fails `cargo test --doc` for example. + The `prost` feature must be enabled to use this feature. +- `prost`: Enables usage of prost generator (enabled by default). +- `transport`: Enables generation of `connect` method using `tonic::transport::Channel` + (enabled by default). + ## Features Required dependencies @@ -15,21 +24,22 @@ prost = "" tonic-build = "" ``` -## Examples +## Getting Started -### Simple +`tonic-build` works by being included as a [`build.rs` file](https://doc.rust-lang.org/cargo/reference/build-scripts.html) at the root of the binary/library. -In `build.rs`: -```rust +You can rely on the defaults via + +```rust,no_run fn main() -> Result<(), Box> { tonic_build::compile_protos("proto/service.proto")?; Ok(()) } ``` -### Configuration +Or configure the generated code deeper via -```rust +```rust,no_run fn main() -> Result<(), Box> { tonic_build::configure() .build_server(false) @@ -40,20 +50,37 @@ fn main() -> Result<(), Box> { Ok(()) } ``` -See [more examples here](https://github.com/hyperium/tonic/tree/master/examples) + +For further details how to use the generated client/server, see the [examples here](https://github.com/hyperium/tonic/tree/master/examples) or the Google APIs example below. + + +## NixOS related hints + +On NixOS, it is better to specify the location of `PROTOC` and `PROTOC_INCLUDE` explicitly. + +```bash +$ export PROTOBUF_LOCATION=$(nix-env -q protobuf --out-path --no-name) +$ export PROTOC=$PROTOBUF_LOCATION/bin/protoc +$ export PROTOC_INCLUDE=$PROTOBUF_LOCATION/include +$ cargo build +``` + +The reason being that if `prost_build::compile_protos` fails to generate the resultant package, +the failure is not obvious until the `include!(concat!(env!("OUT_DIR"), "/resultant.rs"));` +fails with `No such file or directory` error. ### Google APIs example A good way to use Google API is probably using git submodules. So suppose in our `proto` folder we do: -``` +```bash git submodule add https://github.com/googleapis/googleapis git submodule update --remote ``` And a bunch of Google proto files in structure will be like this: -``` +```raw ├── googleapis │   └── google │   ├── api @@ -68,21 +95,23 @@ And a bunch of Google proto files in structure will be like this: │   └── schema.proto ``` -Then we can generate Rust code via this setup in our `build.rs` -```rust -fn main() { +Then we can generate Rust code via this setup in our `build.rs`: + +```rust,no_run +fn main() -> Result<(), Box> { tonic_build::configure() .build_server(false) //.out_dir("src/google") // you can change the generated code's location .compile_protos( &["proto/googleapis/google/pubsub/v1/pubsub.proto"], &["proto/googleapis"], // specify the root location to search proto dependencies - ).unwrap(); + )?; + Ok(()) } ``` Then you can reference the generated Rust like this this in your code: -```rust +```rust,ignore pub mod api { tonic::include_proto!("google.pubsub.v1"); } @@ -92,7 +121,7 @@ use api::{publisher_client::PublisherClient, ListTopicsRequest}; Or if you want to save the generated code in your own code base, you can uncomment the line `.out_dir(...)` above, and in your lib file config a mod like this: -```rust +```rust,ignore pub mod google { #[path = ""] pub mod pubsub { diff --git a/tonic-build/src/lib.rs b/tonic-build/src/lib.rs index 2ba5b08ce..fef48637c 100644 --- a/tonic-build/src/lib.rs +++ b/tonic-build/src/lib.rs @@ -1,65 +1,4 @@ -//! `tonic-build` compiles `proto` files via `prost` and generates service stubs -//! and proto definitions for use with `tonic`. -//! -//! # Feature flags -//! -//! - `cleanup-markdown`: Enables cleaning up documentation from the generated code. Useful -//! when documentation of the generated code fails `cargo test --doc` for example. The -//! `prost` feature must be enabled to use this feature. -//! - `prost`: Enables usage of prost generator (enabled by default). -//! - `transport`: Enables generation of `connect` method using `tonic::transport::Channel` -//! (enabled by default). -//! -//! # Required dependencies -//! -//! ```toml -//! [dependencies] -//! tonic = -//! prost = -//! -//! [build-dependencies] -//! tonic-build = -//! ``` -//! -//! # Examples -//! Simple -//! -//! ```rust,no_run -//! fn main() -> Result<(), Box> { -//! tonic_build::compile_protos("proto/service.proto")?; -//! Ok(()) -//! } -//! ``` -//! -//! Configuration -//! -//! ```rust,no_run -//! fn main() -> Result<(), Box> { -//! tonic_build::configure() -//! .build_server(false) -//! .compile_protos( -//! &["proto/helloworld/helloworld.proto"], -//! &["proto/helloworld"], -//! )?; -//! Ok(()) -//! } -//!``` -//! -//! ## NixOS related hints -//! -//! On NixOS, it is better to specify the location of `PROTOC` and `PROTOC_INCLUDE` explicitly. -//! -//! ```bash -//! $ export PROTOBUF_LOCATION=$(nix-env -q protobuf --out-path --no-name) -//! $ export PROTOC=$PROTOBUF_LOCATION/bin/protoc -//! $ export PROTOC_INCLUDE=$PROTOBUF_LOCATION/include -//! $ cargo build -//! ``` -//! -//! The reason being that if `prost_build::compile_protos` fails to generate the resultant package, -//! the failure is not obvious until the `include!(concat!(env!("OUT_DIR"), "/resultant.rs"));` -//! fails with `No such file or directory` error. - +#![doc = include_str!("../README.md")] #![recursion_limit = "256"] #![doc( html_logo_url = "https://raw.githubusercontent.com/tokio-rs/website/master/public/img/icons/tonic.svg" @@ -85,9 +24,9 @@ pub use prost::{compile_fds, compile_protos, configure, Builder}; pub mod manual; /// Service code generation for client -pub mod client; +mod client; /// Service code generation for Server -pub mod server; +mod server; mod code_gen; pub use code_gen::CodeGenBuilder; @@ -234,7 +173,7 @@ fn generate_attributes<'a>( .filter(|(matcher, _)| match_name(matcher, name)) .flat_map(|(_, attr)| { // attributes cannot be parsed directly, so we pretend they're on a struct - syn::parse_str::(&format!("{}\nstruct fake;", attr)) + syn::parse_str::(&format!("{attr}\nstruct fake;")) .unwrap() .attrs }) @@ -259,7 +198,7 @@ fn generate_doc_comment>(comment: S) -> TokenStream { let comment = comment.as_ref(); let comment = if !comment.starts_with(' ') { - format!(" {}", comment) + format!(" {comment}") } else { comment.to_string() }; diff --git a/tonic-build/src/prost.rs b/tonic-build/src/prost.rs index 6bf3847b2..f89d00cd9 100644 --- a/tonic-build/src/prost.rs +++ b/tonic-build/src/prost.rs @@ -180,7 +180,7 @@ impl crate::Method for TonicBuildMethod { .unwrap() .to_token_stream() } else { - syn::parse_str::(&format!("{}::{}", proto_path, rust_type)) + syn::parse_str::(&format!("{proto_path}::{rust_type}")) .unwrap() .to_token_stream() } diff --git a/tonic-health/Cargo.toml b/tonic-health/Cargo.toml index fe6fd2a52..3f39a782c 100644 --- a/tonic-health/Cargo.toml +++ b/tonic-health/Cargo.toml @@ -11,18 +11,18 @@ license = "MIT" name = "tonic-health" readme = "README.md" repository = "https://github.com/hyperium/tonic" -version = "0.13.0" +version = "0.14.0" rust-version = { workspace = true } [dependencies] -prost = "0.13" +prost = "0.14" tokio = {version = "1.0", features = ["sync"]} tokio-stream = {version = "0.1", default-features = false, features = ["sync"]} -tonic = { version = "0.13.0", path = "../tonic", default-features = false, features = ["codegen", "prost"] } +tonic = { version = "0.14.0", path = "../tonic", default-features = false, features = ["codegen", "prost"] } [dev-dependencies] tokio = {version = "1.0", features = ["rt-multi-thread", "macros"]} -prost-types = "0.13.0" +prost-types = "0.14.0" [lints] workspace = true diff --git a/tonic-health/LICENSE b/tonic-health/LICENSE deleted file mode 100644 index 307709840..000000000 --- a/tonic-health/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2020 Lucio Franco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/tonic-health/LICENSE b/tonic-health/LICENSE new file mode 120000 index 000000000..ea5b60640 --- /dev/null +++ b/tonic-health/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/tonic-health/src/generated/grpc_health_v1.rs b/tonic-health/src/generated/grpc_health_v1.rs index 67ec57c98..1d4b99d3f 100644 --- a/tonic-health/src/generated/grpc_health_v1.rs +++ b/tonic-health/src/generated/grpc_health_v1.rs @@ -1,10 +1,10 @@ // This file is @generated by prost-build. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct HealthCheckRequest { #[prost(string, tag = "1")] pub service: ::prost::alloc::string::String, } -#[derive(Clone, Copy, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct HealthCheckResponse { #[prost(enumeration = "health_check_response::ServingStatus", tag = "1")] pub status: i32, diff --git a/tonic-health/src/server.rs b/tonic-health/src/server.rs index a512b60fd..1a4d73e7f 100644 --- a/tonic-health/src/server.rs +++ b/tonic-health/src/server.rs @@ -37,7 +37,8 @@ pub struct HealthReporter { } impl HealthReporter { - fn new() -> Self { + /// Create a new HealthReporter with an initial service (named ""), corresponding to overall server health + pub fn new() -> Self { // According to the gRPC Health Check specification, the empty service "" corresponds to the overall server health let server_status = ("".to_string(), watch::channel(ServingStatus::Serving)); @@ -97,6 +98,12 @@ impl HealthReporter { } } +impl Default for HealthReporter { + fn default() -> Self { + Self::new() + } +} + /// A service providing implementations of gRPC health checking protocol. #[derive(Debug)] pub struct HealthService { @@ -108,6 +115,11 @@ impl HealthService { HealthService { statuses: services } } + /// Create a HealthService, carrying across the statuses from an existing HealthReporter + pub fn from_health_reporter(health_reporter: HealthReporter) -> Self { + Self::new(health_reporter.statuses) + } + async fn service_health(&self, service_name: &str) -> Option { let reader = self.statuses.read().await; reader.get(service_name).map(|p| *p.1.borrow()) diff --git a/tonic-reflection/Cargo.toml b/tonic-reflection/Cargo.toml index 7686c1e70..4d8e0f10d 100644 --- a/tonic-reflection/Cargo.toml +++ b/tonic-reflection/Cargo.toml @@ -14,7 +14,7 @@ license = "MIT" name = "tonic-reflection" readme = "README.md" repository = "https://github.com/hyperium/tonic" -version = "0.13.0" +version = "0.14.0" rust-version = { workspace = true } [package.metadata.docs.rs] @@ -25,15 +25,15 @@ server = ["dep:prost-types", "dep:tokio", "dep:tokio-stream"] default = ["server"] [dependencies] -prost = "0.13" -prost-types = {version = "0.13", optional = true} +prost = "0.14" +prost-types = {version = "0.14", optional = true} tokio = { version = "1.0", features = ["sync", "rt"], optional = true } tokio-stream = {version = "0.1", default-features = false, optional = true } -tonic = { version = "0.13.0", path = "../tonic", default-features = false, features = ["codegen", "prost"] } +tonic = { version = "0.14.0", path = "../tonic", default-features = false, features = ["codegen", "prost"] } [dev-dependencies] tokio-stream = {version = "0.1", default-features = false, features = ["net"]} -tonic = { version = "0.13.0", path = "../tonic", default-features = false, features = ["transport"] } +tonic = { version = "0.14.0", path = "../tonic", default-features = false, features = ["transport"] } [lints] workspace = true diff --git a/tonic-reflection/LICENSE b/tonic-reflection/LICENSE deleted file mode 100644 index 307709840..000000000 --- a/tonic-reflection/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2020 Lucio Franco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/tonic-reflection/LICENSE b/tonic-reflection/LICENSE new file mode 120000 index 000000000..ea5b60640 --- /dev/null +++ b/tonic-reflection/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/tonic-reflection/src/generated/grpc_reflection_v1.rs b/tonic-reflection/src/generated/grpc_reflection_v1.rs index 569ee6715..cb56c7ae7 100644 --- a/tonic-reflection/src/generated/grpc_reflection_v1.rs +++ b/tonic-reflection/src/generated/grpc_reflection_v1.rs @@ -1,6 +1,6 @@ // This file is @generated by prost-build. /// The message sent by the client when calling ServerReflectionInfo method. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ServerReflectionRequest { #[prost(string, tag = "1")] pub host: ::prost::alloc::string::String, @@ -17,7 +17,7 @@ pub mod server_reflection_request { /// To use reflection service, the client should set one of the following /// fields in message_request. The server distinguishes requests by their /// defined field and then handles them using corresponding methods. - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)] pub enum MessageRequest { /// Find a proto file by the file name. #[prost(string, tag = "3")] @@ -49,7 +49,7 @@ pub mod server_reflection_request { } /// The type name and extension number sent by the client when requesting /// file_containing_extension. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExtensionRequest { /// Fully-qualified type name. The format should be . #[prost(string, tag = "1")] @@ -99,7 +99,7 @@ pub mod server_reflection_response { /// Serialized FileDescriptorProto messages sent by the server answering /// a file_by_filename, file_containing_symbol, or file_containing_extension /// request. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FileDescriptorResponse { /// Serialized FileDescriptorProto messages. We avoid taking a dependency on /// descriptor.proto, which uses proto2 only features, by making them opaque @@ -109,7 +109,7 @@ pub struct FileDescriptorResponse { } /// A list of extension numbers sent by the server answering /// all_extension_numbers_of_type request. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExtensionNumberResponse { /// Full name of the base type, including the package name. The format /// is . @@ -128,7 +128,7 @@ pub struct ListServiceResponse { } /// The information of a single service used by ListServiceResponse to answer /// list_services request. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ServiceResponse { /// Full name of a registered service, including its package name. The format /// is . @@ -136,7 +136,7 @@ pub struct ServiceResponse { pub name: ::prost::alloc::string::String, } /// The error code and error message sent by the server when an error occurs. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ErrorResponse { /// This field uses the error codes defined in grpc::StatusCode. #[prost(int32, tag = "1")] diff --git a/tonic-reflection/src/generated/grpc_reflection_v1alpha.rs b/tonic-reflection/src/generated/grpc_reflection_v1alpha.rs index 685d9b0a1..cf2b3aa34 100644 --- a/tonic-reflection/src/generated/grpc_reflection_v1alpha.rs +++ b/tonic-reflection/src/generated/grpc_reflection_v1alpha.rs @@ -1,6 +1,6 @@ // This file is @generated by prost-build. /// The message sent by the client when calling ServerReflectionInfo method. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ServerReflectionRequest { #[prost(string, tag = "1")] pub host: ::prost::alloc::string::String, @@ -17,7 +17,7 @@ pub mod server_reflection_request { /// To use reflection service, the client should set one of the following /// fields in message_request. The server distinguishes requests by their /// defined field and then handles them using corresponding methods. - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)] pub enum MessageRequest { /// Find a proto file by the file name. #[prost(string, tag = "3")] @@ -49,7 +49,7 @@ pub mod server_reflection_request { } /// The type name and extension number sent by the client when requesting /// file_containing_extension. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExtensionRequest { /// Fully-qualified type name. The format should be . #[prost(string, tag = "1")] @@ -99,7 +99,7 @@ pub mod server_reflection_response { /// Serialized FileDescriptorProto messages sent by the server answering /// a file_by_filename, file_containing_symbol, or file_containing_extension /// request. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FileDescriptorResponse { /// Serialized FileDescriptorProto messages. We avoid taking a dependency on /// descriptor.proto, which uses proto2 only features, by making them opaque @@ -109,7 +109,7 @@ pub struct FileDescriptorResponse { } /// A list of extension numbers sent by the server answering /// all_extension_numbers_of_type request. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExtensionNumberResponse { /// Full name of the base type, including the package name. The format /// is . @@ -128,7 +128,7 @@ pub struct ListServiceResponse { } /// The information of a single service used by ListServiceResponse to answer /// list_services request. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ServiceResponse { /// Full name of a registered service, including its package name. The format /// is . @@ -136,7 +136,7 @@ pub struct ServiceResponse { pub name: ::prost::alloc::string::String, } /// The error code and error message sent by the server when an error occurs. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ErrorResponse { /// This field uses the error codes defined in grpc::StatusCode. #[prost(int32, tag = "1")] diff --git a/tonic-reflection/src/server/mod.rs b/tonic-reflection/src/server/mod.rs index 2ce34eadb..2b1a806a9 100644 --- a/tonic-reflection/src/server/mod.rs +++ b/tonic-reflection/src/server/mod.rs @@ -251,7 +251,7 @@ impl ReflectionServiceState { fn symbol_by_name(&self, symbol: &str) -> Result, Status> { match self.symbols.get(symbol) { - None => Err(Status::not_found(format!("symbol '{}' not found", symbol))), + None => Err(Status::not_found(format!("symbol '{symbol}' not found"))), Some(fd) => { let mut encoded_fd = Vec::new(); if fd.clone().encode(&mut encoded_fd).is_err() { @@ -265,7 +265,7 @@ impl ReflectionServiceState { fn file_by_filename(&self, filename: &str) -> Result, Status> { match self.files.get(filename) { - None => Err(Status::not_found(format!("file '{}' not found", filename))), + None => Err(Status::not_found(format!("file '{filename}' not found"))), Some(fd) => { let mut encoded_fd = Vec::new(); if fd.clone().encode(&mut encoded_fd).is_err() { @@ -285,14 +285,13 @@ fn extract_name( ) -> Result { match maybe_name { None => Err(Error::InvalidFileDescriptorSet(format!( - "missing {} name", - name_type + "missing {name_type} name" ))), Some(name) => { if prefix.is_empty() { Ok(name.to_string()) } else { - Ok(format!("{}.{}", prefix, name)) + Ok(format!("{prefix}.{name}")) } } } @@ -320,7 +319,7 @@ impl Display for Error { match self { Error::DecodeError(_) => f.write_str("error decoding FileDescriptorSet from buffer"), Error::InvalidFileDescriptorSet(s) => { - write!(f, "invalid FileDescriptorSet - {}", s) + write!(f, "invalid FileDescriptorSet - {s}") } } } diff --git a/tonic-types/Cargo.toml b/tonic-types/Cargo.toml index 6ea75da69..ce1d6e3c8 100644 --- a/tonic-types/Cargo.toml +++ b/tonic-types/Cargo.toml @@ -14,13 +14,13 @@ license = "MIT" name = "tonic-types" readme = "README.md" repository = "https://github.com/hyperium/tonic" -version = "0.13.0" +version = "0.14.0" rust-version = { workspace = true } [dependencies] -prost = "0.13" -prost-types = "0.13" -tonic = { version = "0.13.0", path = "../tonic", default-features = false } +prost = "0.14" +prost-types = "0.14" +tonic = { version = "0.14.0", path = "../tonic", default-features = false } [lints] workspace = true diff --git a/tonic-types/LICENSE b/tonic-types/LICENSE deleted file mode 100644 index 307709840..000000000 --- a/tonic-types/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2020 Lucio Franco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/tonic-types/LICENSE b/tonic-types/LICENSE new file mode 120000 index 000000000..ea5b60640 --- /dev/null +++ b/tonic-types/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/tonic-types/proto/error_details.proto b/tonic-types/proto/error_details.proto index 7bdf0dd39..37bf78398 100644 --- a/tonic-types/proto/error_details.proto +++ b/tonic-types/proto/error_details.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,6 +24,58 @@ option java_outer_classname = "ErrorDetailsProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +message ErrorInfo { + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + string reason = 1; + + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + string domain = 2; + + // Additional structured details about this error. + // + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of + // instances that can be created in a single (batch) request. + map metadata = 3; +} + // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error // responses. @@ -79,61 +131,75 @@ message QuotaFailure { // For example: "Service disabled" or "Daily Limit for read operations // exceeded". string description = 2; - } - // Describes all quota violations. - repeated Violation violations = 1; -} + // The API Service from which the `QuotaFailure.Violation` orginates. In + // some cases, Quota issues originate from an API Service other than the one + // that was called. In other words, a dependency of the called API Service + // could be the cause of the `QuotaFailure`, and this field would have the + // dependency API service name. + // + // For example, if the called API is Kubernetes Engine API + // (container.googleapis.com), and a quota violation occurs in the + // Kubernetes Engine API itself, this field would be + // "container.googleapis.com". On the other hand, if the quota violation + // occurs when the Kubernetes Engine API creates VMs in the Compute Engine + // API (compute.googleapis.com), this field would be + // "compute.googleapis.com". + string api_service = 3; -// Describes the cause of the error with structured details. -// -// Example of an error when contacting the "pubsub.googleapis.com" API when it -// is not enabled: -// ```json -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } -// ``` -// This response indicates that the pubsub.googleapis.com API is not enabled. -// -// Example of an error that is returned when attempting to create a Spanner -// instance in a region that is out of stock: -// ```json -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } -// ``` -message ErrorInfo { - // The reason of the error. This is a constant value that identifies the - // proximate cause of the error. Error reasons are unique within a particular - // domain of errors. This should be at most 63 characters and match - // /[A-Z0-9_]+/. - string reason = 1; + // The metric of the violated quota. A quota metric is a named counter to + // measure usage, such as API requests or CPUs. When an activity occurs in a + // service, such as Virtual Machine allocation, one or more quota metrics + // may be affected. + // + // For example, "compute.googleapis.com/cpus_per_vm_family", + // "storage.googleapis.com/internet_egress_bandwidth". + string quota_metric = 4; - // The logical grouping to which the "reason" belongs. The error domain - // is typically the registered service name of the tool or product that - // generates the error. Example: "pubsub.googleapis.com". If the error is - // generated by some common infrastructure, the error domain must be a - // globally unique value that identifies the infrastructure. For Google API - // infrastructure, the error domain is "googleapis.com". - string domain = 2; + // The id of the violated quota. Also know as "limit name", this is the + // unique identifier of a quota in the context of an API service. + // + // For example, "CPUS-PER-VM-FAMILY-per-project-region". + string quota_id = 5; - // Additional structured details about this error. - // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - // instances that can be created in a single (batch) request. - map metadata = 3; + // The dimensions of the violated quota. Every non-global quota is enforced + // on a set of dimensions. While quota metric defines what to count, the + // dimensions specify for what aspects the counter should be increased. + // + // For example, the quota "CPUs per region per VM family" enforces a limit + // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + // "region" and "vm_family". And if the violation occurred in region + // "us-central1" and for VM family "n1", the quota_dimensions would be, + // + // { + // "region": "us-central1", + // "vm_family": "n1", + // } + // + // When a quota is enforced globally, the quota_dimensions would always be + // empty. + map quota_dimensions = 6; + + // The enforced quota value at the time of the `QuotaFailure`. + // + // For example, if the enforced quota value at the time of the + // `QuotaFailure` on the number of CPUs is "10", then the value of this + // field would reflect this quantity. + int64 quota_value = 7; + + // The new quota value being rolled out at the time of the violation. At the + // completion of the rollout, this value will be enforced in place of + // quota_value. If no rollout is in progress at the time of the violation, + // this field is not set. + // + // For example, if at the time of the violation a rollout is in progress + // changing the number of CPUs quota from 10 to 20, 20 would be the value of + // this field. + optional int64 future_quota_value = 8; + } + + // Describes all quota violations. + repeated Violation violations = 1; } // Describes what preconditions have failed. @@ -170,13 +236,59 @@ message PreconditionFailure { message BadRequest { // A message type used to describe a single bad request field. message FieldViolation { - // A path leading to a field in the request body. The value will be a + // A path that leads to a field in the request body. The value will be a // sequence of dot-separated identifiers that identify a protocol buffer - // field. E.g., "field_violations.field" would identify this field. + // field. + // + // Consider the following: + // + // message CreateContactRequest { + // message EmailAddress { + // enum Type { + // TYPE_UNSPECIFIED = 0; + // HOME = 1; + // WORK = 2; + // } + // + // optional string email = 1; + // repeated EmailType type = 2; + // } + // + // string full_name = 1; + // repeated EmailAddress email_addresses = 2; + // } + // + // In this example, in proto `field` could take one of the following values: + // + // * `full_name` for a violation in the `full_name` value + // * `email_addresses[1].email` for a violation in the `email` field of the + // first `email_addresses` message + // * `email_addresses[3].type[2]` for a violation in the second `type` + // value in the third `email_addresses` message. + // + // In JSON, the same values are represented as: + // + // * `fullName` for a violation in the `fullName` value + // * `emailAddresses[1].email` for a violation in the `email` field of the + // first `emailAddresses` message + // * `emailAddresses[3].type[2]` for a violation in the second `type` + // value in the third `emailAddresses` message. string field = 1; // A description of why the request element is bad. string description = 2; + + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + string reason = 3; + + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage localized_message = 4; } // Describes all violations in a client request. @@ -204,7 +316,8 @@ message ResourceInfo { // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. string resource_name = 2; // The owner of the resource (optional). @@ -241,7 +354,7 @@ message Help { // which can be attached to an RPC error. message LocalizedMessage { // The locale used following the specification defined at - // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. // Examples are: "en-US", "fr-CH", "es-MX" string locale = 1; diff --git a/tonic-types/src/generated/google_rpc.rs b/tonic-types/src/generated/google_rpc.rs index 856defa5c..d7d03f725 100644 --- a/tonic-types/src/generated/google_rpc.rs +++ b/tonic-types/src/generated/google_rpc.rs @@ -21,6 +21,66 @@ pub struct Status { #[prost(message, repeated, tag = "3")] pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, } +/// Describes the cause of the error with structured details. +/// +/// Example of an error when contacting the "pubsub.googleapis.com" API when it +/// is not enabled: +/// +/// ```text +/// { "reason": "API_DISABLED" +/// "domain": "googleapis.com" +/// "metadata": { +/// "resource": "projects/123", +/// "service": "pubsub.googleapis.com" +/// } +/// } +/// ``` +/// +/// This response indicates that the pubsub.googleapis.com API is not enabled. +/// +/// Example of an error that is returned when attempting to create a Spanner +/// instance in a region that is out of stock: +/// +/// ```text +/// { "reason": "STOCKOUT" +/// "domain": "spanner.googleapis.com", +/// "metadata": { +/// "availableRegions": "us-central1,us-east2" +/// } +/// } +/// ``` +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ErrorInfo { + /// The reason of the error. This is a constant value that identifies the + /// proximate cause of the error. Error reasons are unique within a particular + /// domain of errors. This should be at most 63 characters and match a + /// regular expression of `[A-Z][A-Z0-9_]+\[A-Z0-9\]`, which represents + /// UPPER_SNAKE_CASE. + #[prost(string, tag = "1")] + pub reason: ::prost::alloc::string::String, + /// The logical grouping to which the "reason" belongs. The error domain + /// is typically the registered service name of the tool or product that + /// generates the error. Example: "pubsub.googleapis.com". If the error is + /// generated by some common infrastructure, the error domain must be a + /// globally unique value that identifies the infrastructure. For Google API + /// infrastructure, the error domain is "googleapis.com". + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Additional structured details about this error. + /// + /// Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + /// ideally be lowerCamelCase. Also, they must be limited to 64 characters in + /// length. When identifying the current value of an exceeded limit, the units + /// should be contained in the key, not the value. For example, rather than + /// `{"instanceLimit": "100/request"}`, should be returned as, + /// `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of + /// instances that can be created in a single (batch) request. + #[prost(map = "string, string", tag = "3")] + pub metadata: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} /// Describes when the clients can retry a failed request. Clients could ignore /// the recommendation here or retry when this information is missing from error /// responses. @@ -34,14 +94,14 @@ pub struct Status { /// the delay between retries based on `retry_delay`, until either a maximum /// number of retries have been reached or a maximum retry delay cap has been /// reached. -#[derive(Clone, Copy, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct RetryInfo { /// Clients should wait at least this long between retrying the same request. #[prost(message, optional, tag = "1")] pub retry_delay: ::core::option::Option<::prost_types::Duration>, } /// Describes additional debugging info. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct DebugInfo { /// The stack trace entries indicating where the error occurred. #[prost(string, repeated, tag = "1")] @@ -87,66 +147,76 @@ pub mod quota_failure { /// exceeded". #[prost(string, tag = "2")] pub description: ::prost::alloc::string::String, + /// The API Service from which the `QuotaFailure.Violation` orginates. In + /// some cases, Quota issues originate from an API Service other than the one + /// that was called. In other words, a dependency of the called API Service + /// could be the cause of the `QuotaFailure`, and this field would have the + /// dependency API service name. + /// + /// For example, if the called API is Kubernetes Engine API + /// (container.googleapis.com), and a quota violation occurs in the + /// Kubernetes Engine API itself, this field would be + /// "container.googleapis.com". On the other hand, if the quota violation + /// occurs when the Kubernetes Engine API creates VMs in the Compute Engine + /// API (compute.googleapis.com), this field would be + /// "compute.googleapis.com". + #[prost(string, tag = "3")] + pub api_service: ::prost::alloc::string::String, + /// The metric of the violated quota. A quota metric is a named counter to + /// measure usage, such as API requests or CPUs. When an activity occurs in a + /// service, such as Virtual Machine allocation, one or more quota metrics + /// may be affected. + /// + /// For example, "compute.googleapis.com/cpus_per_vm_family", + /// "storage.googleapis.com/internet_egress_bandwidth". + #[prost(string, tag = "4")] + pub quota_metric: ::prost::alloc::string::String, + /// The id of the violated quota. Also know as "limit name", this is the + /// unique identifier of a quota in the context of an API service. + /// + /// For example, "CPUS-PER-VM-FAMILY-per-project-region". + #[prost(string, tag = "5")] + pub quota_id: ::prost::alloc::string::String, + /// The dimensions of the violated quota. Every non-global quota is enforced + /// on a set of dimensions. While quota metric defines what to count, the + /// dimensions specify for what aspects the counter should be increased. + /// + /// For example, the quota "CPUs per region per VM family" enforces a limit + /// on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + /// "region" and "vm_family". And if the violation occurred in region + /// "us-central1" and for VM family "n1", the quota_dimensions would be, + /// + /// { + /// "region": "us-central1", + /// "vm_family": "n1", + /// } + /// + /// When a quota is enforced globally, the quota_dimensions would always be + /// empty. + #[prost(map = "string, string", tag = "6")] + pub quota_dimensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// The enforced quota value at the time of the `QuotaFailure`. + /// + /// For example, if the enforced quota value at the time of the + /// `QuotaFailure` on the number of CPUs is "10", then the value of this + /// field would reflect this quantity. + #[prost(int64, tag = "7")] + pub quota_value: i64, + /// The new quota value being rolled out at the time of the violation. At the + /// completion of the rollout, this value will be enforced in place of + /// quota_value. If no rollout is in progress at the time of the violation, + /// this field is not set. + /// + /// For example, if at the time of the violation a rollout is in progress + /// changing the number of CPUs quota from 10 to 20, 20 would be the value of + /// this field. + #[prost(int64, optional, tag = "8")] + pub future_quota_value: ::core::option::Option, } } -/// Describes the cause of the error with structured details. -/// -/// Example of an error when contacting the "pubsub.googleapis.com" API when it -/// is not enabled: -/// -/// ```text,json -/// { "reason": "API_DISABLED" -/// "domain": "googleapis.com" -/// "metadata": { -/// "resource": "projects/123", -/// "service": "pubsub.googleapis.com" -/// } -/// } -/// ``` -/// -/// This response indicates that the pubsub.googleapis.com API is not enabled. -/// -/// Example of an error that is returned when attempting to create a Spanner -/// instance in a region that is out of stock: -/// -/// ```text,json -/// { "reason": "STOCKOUT" -/// "domain": "spanner.googleapis.com", -/// "metadata": { -/// "availableRegions": "us-central1,us-east2" -/// } -/// } -/// ``` -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ErrorInfo { - /// The reason of the error. This is a constant value that identifies the - /// proximate cause of the error. Error reasons are unique within a particular - /// domain of errors. This should be at most 63 characters and match - /// /\[A-Z0-9\_\]+/. - #[prost(string, tag = "1")] - pub reason: ::prost::alloc::string::String, - /// The logical grouping to which the "reason" belongs. The error domain - /// is typically the registered service name of the tool or product that - /// generates the error. Example: "pubsub.googleapis.com". If the error is - /// generated by some common infrastructure, the error domain must be a - /// globally unique value that identifies the infrastructure. For Google API - /// infrastructure, the error domain is "googleapis.com". - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Additional structured details about this error. - /// - /// Keys should match /\[a-zA-Z0-9-\_\]/ and be limited to 64 characters in - /// length. When identifying the current value of an exceeded limit, the units - /// should be contained in the key, not the value. For example, rather than - /// {"instanceLimit": "100/request"}, should be returned as, - /// {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - /// instances that can be created in a single (batch) request. - #[prost(map = "string, string", tag = "3")] - pub metadata: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} /// Describes what preconditions have failed. /// /// For example, if an RPC failed because it required the Terms of Service to be @@ -161,7 +231,7 @@ pub struct PreconditionFailure { /// Nested message and enum types in `PreconditionFailure`. pub mod precondition_failure { /// A message type used to describe a single precondition failure. - #[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Violation { /// The type of PreconditionFailure. We recommend using a service-specific /// enum type to define the supported precondition violation subjects. For @@ -192,21 +262,69 @@ pub struct BadRequest { /// Nested message and enum types in `BadRequest`. pub mod bad_request { /// A message type used to describe a single bad request field. - #[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FieldViolation { - /// A path leading to a field in the request body. The value will be a + /// A path that leads to a field in the request body. The value will be a /// sequence of dot-separated identifiers that identify a protocol buffer - /// field. E.g., "field_violations.field" would identify this field. + /// field. + /// + /// Consider the following: + /// + /// ```text + /// message CreateContactRequest { + /// message EmailAddress { + /// enum Type { + /// TYPE_UNSPECIFIED = 0; + /// HOME = 1; + /// WORK = 2; + /// } + /// + /// optional string email = 1; + /// repeated EmailType type = 2; + /// } + /// + /// string full_name = 1; + /// repeated EmailAddress email_addresses = 2; + /// } + /// ``` + /// + /// In this example, in proto `field` could take one of the following values: + /// + /// * `full_name` for a violation in the `full_name` value + /// * `email_addresses\[1\].email` for a violation in the `email` field of the + /// first `email_addresses` message + /// * `email_addresses\[3\].type\[2\]` for a violation in the second `type` + /// value in the third `email_addresses` message. + /// + /// In JSON, the same values are represented as: + /// + /// * `fullName` for a violation in the `fullName` value + /// * `emailAddresses\[1\].email` for a violation in the `email` field of the + /// first `emailAddresses` message + /// * `emailAddresses\[3\].type\[2\]` for a violation in the second `type` + /// value in the third `emailAddresses` message. #[prost(string, tag = "1")] pub field: ::prost::alloc::string::String, /// A description of why the request element is bad. #[prost(string, tag = "2")] pub description: ::prost::alloc::string::String, + /// The reason of the field-level error. This is a constant value that + /// identifies the proximate cause of the field-level error. It should + /// uniquely identify the type of the FieldViolation within the scope of the + /// google.rpc.ErrorInfo.domain. This should be at most 63 + /// characters and match a regular expression of `[A-Z][A-Z0-9_]+\[A-Z0-9\]`, + /// which represents UPPER_SNAKE_CASE. + #[prost(string, tag = "3")] + pub reason: ::prost::alloc::string::String, + /// Provides a localized error message for field-level errors that is safe to + /// return to the API consumer. + #[prost(message, optional, tag = "4")] + pub localized_message: ::core::option::Option, } } /// Contains metadata about the request that clients can attach when filing a bug /// or providing other forms of feedback. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct RequestInfo { /// An opaque string that should only be interpreted by the service generating /// it. For example, it can be used to identify requests in the service's logs. @@ -218,7 +336,7 @@ pub struct RequestInfo { pub serving_data: ::prost::alloc::string::String, } /// Describes the resource that is being accessed. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ResourceInfo { /// A name for the type of resource being accessed, e.g. "sql table", /// "cloud storage bucket", "file", "Google calendar"; or the type URL @@ -227,7 +345,8 @@ pub struct ResourceInfo { pub resource_type: ::prost::alloc::string::String, /// The name of the resource being accessed. For example, a shared calendar /// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - /// error is \[google.rpc.Code.PERMISSION_DENIED\]\[google.rpc.Code.PERMISSION_DENIED\]. + /// error is + /// \[google.rpc.Code.PERMISSION_DENIED\]\[google.rpc.Code.PERMISSION_DENIED\]. #[prost(string, tag = "2")] pub resource_name: ::prost::alloc::string::String, /// The owner of the resource (optional). @@ -255,7 +374,7 @@ pub struct Help { /// Nested message and enum types in `Help`. pub mod help { /// Describes a URL link. - #[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Link { /// Describes what the link offers. #[prost(string, tag = "1")] @@ -267,10 +386,10 @@ pub mod help { } /// Provides a localized error message that is safe to return to the user /// which can be attached to an RPC error. -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct LocalizedMessage { /// The locale used following the specification defined at - /// + /// /// Examples are: "en-US", "fr-CH", "es-MX" #[prost(string, tag = "1")] pub locale: ::prost::alloc::string::String, diff --git a/tonic-types/src/generated/types_fds.rs b/tonic-types/src/generated/types_fds.rs index b16fdcd09..28bef4dcc 100644 --- a/tonic-types/src/generated/types_fds.rs +++ b/tonic-types/src/generated/types_fds.rs @@ -1,33 +1,4 @@ // This file is @generated by codegen. -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,36 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Copyright 2020 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -145,86 +87,123 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 80u8, 66u8, 170u8, 2u8, 30u8, 71u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 80u8, 114u8, 111u8, 116u8, 111u8, 98u8, 117u8, 102u8, 46u8, 87u8, 101u8, 108u8, 108u8, 75u8, 110u8, 111u8, 119u8, 110u8, 84u8, 121u8, 112u8, 101u8, 115u8, 98u8, 6u8, 112u8, - 114u8, 111u8, 116u8, 111u8, 51u8, 10u8, 170u8, 11u8, 10u8, 19u8, 101u8, 114u8, 114u8, + 114u8, 111u8, 116u8, 111u8, 51u8, 10u8, 129u8, 15u8, 10u8, 19u8, 101u8, 114u8, 114u8, 111u8, 114u8, 95u8, 100u8, 101u8, 116u8, 97u8, 105u8, 108u8, 115u8, 46u8, 112u8, 114u8, 111u8, 116u8, 111u8, 18u8, 10u8, 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, 26u8, 30u8, 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 47u8, 112u8, 114u8, 111u8, 116u8, 111u8, 98u8, 117u8, 102u8, 47u8, 100u8, 117u8, 114u8, 97u8, 116u8, 105u8, 111u8, 110u8, 46u8, 112u8, 114u8, 111u8, 116u8, 111u8, 34u8, - 71u8, 10u8, 9u8, 82u8, 101u8, 116u8, 114u8, 121u8, 73u8, 110u8, 102u8, 111u8, 18u8, - 58u8, 10u8, 11u8, 114u8, 101u8, 116u8, 114u8, 121u8, 95u8, 100u8, 101u8, 108u8, 97u8, - 121u8, 24u8, 1u8, 32u8, 1u8, 40u8, 11u8, 50u8, 25u8, 46u8, 103u8, 111u8, 111u8, - 103u8, 108u8, 101u8, 46u8, 112u8, 114u8, 111u8, 116u8, 111u8, 98u8, 117u8, 102u8, - 46u8, 68u8, 117u8, 114u8, 97u8, 116u8, 105u8, 111u8, 110u8, 82u8, 10u8, 114u8, 101u8, - 116u8, 114u8, 121u8, 68u8, 101u8, 108u8, 97u8, 121u8, 34u8, 72u8, 10u8, 9u8, 68u8, - 101u8, 98u8, 117u8, 103u8, 73u8, 110u8, 102u8, 111u8, 18u8, 35u8, 10u8, 13u8, 115u8, - 116u8, 97u8, 99u8, 107u8, 95u8, 101u8, 110u8, 116u8, 114u8, 105u8, 101u8, 115u8, - 24u8, 1u8, 32u8, 3u8, 40u8, 9u8, 82u8, 12u8, 115u8, 116u8, 97u8, 99u8, 107u8, 69u8, - 110u8, 116u8, 114u8, 105u8, 101u8, 115u8, 18u8, 22u8, 10u8, 6u8, 100u8, 101u8, 116u8, - 97u8, 105u8, 108u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 6u8, 100u8, 101u8, 116u8, - 97u8, 105u8, 108u8, 34u8, 155u8, 1u8, 10u8, 12u8, 81u8, 117u8, 111u8, 116u8, 97u8, - 70u8, 97u8, 105u8, 108u8, 117u8, 114u8, 101u8, 18u8, 66u8, 10u8, 10u8, 118u8, 105u8, - 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 115u8, 24u8, 1u8, 32u8, 3u8, 40u8, - 11u8, 50u8, 34u8, 46u8, 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, - 99u8, 46u8, 81u8, 117u8, 111u8, 116u8, 97u8, 70u8, 97u8, 105u8, 108u8, 117u8, 114u8, - 101u8, 46u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 82u8, 10u8, - 118u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 115u8, 26u8, 71u8, - 10u8, 9u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 18u8, 24u8, - 10u8, 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, 24u8, 1u8, 32u8, 1u8, 40u8, - 9u8, 82u8, 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, 18u8, 32u8, 10u8, - 11u8, 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, 112u8, 116u8, 105u8, 111u8, 110u8, - 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 11u8, 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, - 112u8, 116u8, 105u8, 111u8, 110u8, 34u8, 185u8, 1u8, 10u8, 9u8, 69u8, 114u8, 114u8, - 111u8, 114u8, 73u8, 110u8, 102u8, 111u8, 18u8, 22u8, 10u8, 6u8, 114u8, 101u8, 97u8, - 115u8, 111u8, 110u8, 24u8, 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 6u8, 114u8, 101u8, 97u8, - 115u8, 111u8, 110u8, 18u8, 22u8, 10u8, 6u8, 100u8, 111u8, 109u8, 97u8, 105u8, 110u8, - 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 6u8, 100u8, 111u8, 109u8, 97u8, 105u8, 110u8, - 18u8, 63u8, 10u8, 8u8, 109u8, 101u8, 116u8, 97u8, 100u8, 97u8, 116u8, 97u8, 24u8, - 3u8, 32u8, 3u8, 40u8, 11u8, 50u8, 35u8, 46u8, 103u8, 111u8, 111u8, 103u8, 108u8, - 101u8, 46u8, 114u8, 112u8, 99u8, 46u8, 69u8, 114u8, 114u8, 111u8, 114u8, 73u8, 110u8, - 102u8, 111u8, 46u8, 77u8, 101u8, 116u8, 97u8, 100u8, 97u8, 116u8, 97u8, 69u8, 110u8, - 116u8, 114u8, 121u8, 82u8, 8u8, 109u8, 101u8, 116u8, 97u8, 100u8, 97u8, 116u8, 97u8, - 26u8, 59u8, 10u8, 13u8, 77u8, 101u8, 116u8, 97u8, 100u8, 97u8, 116u8, 97u8, 69u8, - 110u8, 116u8, 114u8, 121u8, 18u8, 16u8, 10u8, 3u8, 107u8, 101u8, 121u8, 24u8, 1u8, - 32u8, 1u8, 40u8, 9u8, 82u8, 3u8, 107u8, 101u8, 121u8, 18u8, 20u8, 10u8, 5u8, 118u8, - 97u8, 108u8, 117u8, 101u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 5u8, 118u8, 97u8, - 108u8, 117u8, 101u8, 58u8, 2u8, 56u8, 1u8, 34u8, 189u8, 1u8, 10u8, 19u8, 80u8, 114u8, - 101u8, 99u8, 111u8, 110u8, 100u8, 105u8, 116u8, 105u8, 111u8, 110u8, 70u8, 97u8, - 105u8, 108u8, 117u8, 114u8, 101u8, 18u8, 73u8, 10u8, 10u8, 118u8, 105u8, 111u8, - 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 115u8, 24u8, 1u8, 32u8, 3u8, 40u8, 11u8, - 50u8, 41u8, 46u8, 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, - 46u8, 80u8, 114u8, 101u8, 99u8, 111u8, 110u8, 100u8, 105u8, 116u8, 105u8, 111u8, - 110u8, 70u8, 97u8, 105u8, 108u8, 117u8, 114u8, 101u8, 46u8, 86u8, 105u8, 111u8, - 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 82u8, 10u8, 118u8, 105u8, 111u8, 108u8, - 97u8, 116u8, 105u8, 111u8, 110u8, 115u8, 26u8, 91u8, 10u8, 9u8, 86u8, 105u8, 111u8, - 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 18u8, 18u8, 10u8, 4u8, 116u8, 121u8, 112u8, - 101u8, 24u8, 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 4u8, 116u8, 121u8, 112u8, 101u8, 18u8, - 24u8, 10u8, 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, 24u8, 2u8, 32u8, 1u8, - 40u8, 9u8, 82u8, 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, 18u8, 32u8, - 10u8, 11u8, 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, 112u8, 116u8, 105u8, 111u8, - 110u8, 24u8, 3u8, 32u8, 1u8, 40u8, 9u8, 82u8, 11u8, 100u8, 101u8, 115u8, 99u8, 114u8, - 105u8, 112u8, 116u8, 105u8, 111u8, 110u8, 34u8, 168u8, 1u8, 10u8, 10u8, 66u8, 97u8, - 100u8, 82u8, 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 18u8, 80u8, 10u8, 16u8, 102u8, - 105u8, 101u8, 108u8, 100u8, 95u8, 118u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, - 111u8, 110u8, 115u8, 24u8, 1u8, 32u8, 3u8, 40u8, 11u8, 50u8, 37u8, 46u8, 103u8, - 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, 46u8, 66u8, 97u8, 100u8, - 82u8, 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 46u8, 70u8, 105u8, 101u8, 108u8, - 100u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 82u8, 15u8, - 102u8, 105u8, 101u8, 108u8, 100u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, - 111u8, 110u8, 115u8, 26u8, 72u8, 10u8, 14u8, 70u8, 105u8, 101u8, 108u8, 100u8, 86u8, + 185u8, 1u8, 10u8, 9u8, 69u8, 114u8, 114u8, 111u8, 114u8, 73u8, 110u8, 102u8, 111u8, + 18u8, 22u8, 10u8, 6u8, 114u8, 101u8, 97u8, 115u8, 111u8, 110u8, 24u8, 1u8, 32u8, 1u8, + 40u8, 9u8, 82u8, 6u8, 114u8, 101u8, 97u8, 115u8, 111u8, 110u8, 18u8, 22u8, 10u8, 6u8, + 100u8, 111u8, 109u8, 97u8, 105u8, 110u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 6u8, + 100u8, 111u8, 109u8, 97u8, 105u8, 110u8, 18u8, 63u8, 10u8, 8u8, 109u8, 101u8, 116u8, + 97u8, 100u8, 97u8, 116u8, 97u8, 24u8, 3u8, 32u8, 3u8, 40u8, 11u8, 50u8, 35u8, 46u8, + 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, 46u8, 69u8, + 114u8, 114u8, 111u8, 114u8, 73u8, 110u8, 102u8, 111u8, 46u8, 77u8, 101u8, 116u8, + 97u8, 100u8, 97u8, 116u8, 97u8, 69u8, 110u8, 116u8, 114u8, 121u8, 82u8, 8u8, 109u8, + 101u8, 116u8, 97u8, 100u8, 97u8, 116u8, 97u8, 26u8, 59u8, 10u8, 13u8, 77u8, 101u8, + 116u8, 97u8, 100u8, 97u8, 116u8, 97u8, 69u8, 110u8, 116u8, 114u8, 121u8, 18u8, 16u8, + 10u8, 3u8, 107u8, 101u8, 121u8, 24u8, 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 3u8, 107u8, + 101u8, 121u8, 18u8, 20u8, 10u8, 5u8, 118u8, 97u8, 108u8, 117u8, 101u8, 24u8, 2u8, + 32u8, 1u8, 40u8, 9u8, 82u8, 5u8, 118u8, 97u8, 108u8, 117u8, 101u8, 58u8, 2u8, 56u8, + 1u8, 34u8, 71u8, 10u8, 9u8, 82u8, 101u8, 116u8, 114u8, 121u8, 73u8, 110u8, 102u8, + 111u8, 18u8, 58u8, 10u8, 11u8, 114u8, 101u8, 116u8, 114u8, 121u8, 95u8, 100u8, 101u8, + 108u8, 97u8, 121u8, 24u8, 1u8, 32u8, 1u8, 40u8, 11u8, 50u8, 25u8, 46u8, 103u8, 111u8, + 111u8, 103u8, 108u8, 101u8, 46u8, 112u8, 114u8, 111u8, 116u8, 111u8, 98u8, 117u8, + 102u8, 46u8, 68u8, 117u8, 114u8, 97u8, 116u8, 105u8, 111u8, 110u8, 82u8, 10u8, 114u8, + 101u8, 116u8, 114u8, 121u8, 68u8, 101u8, 108u8, 97u8, 121u8, 34u8, 72u8, 10u8, 9u8, + 68u8, 101u8, 98u8, 117u8, 103u8, 73u8, 110u8, 102u8, 111u8, 18u8, 35u8, 10u8, 13u8, + 115u8, 116u8, 97u8, 99u8, 107u8, 95u8, 101u8, 110u8, 116u8, 114u8, 105u8, 101u8, + 115u8, 24u8, 1u8, 32u8, 3u8, 40u8, 9u8, 82u8, 12u8, 115u8, 116u8, 97u8, 99u8, 107u8, + 69u8, 110u8, 116u8, 114u8, 105u8, 101u8, 115u8, 18u8, 22u8, 10u8, 6u8, 100u8, 101u8, + 116u8, 97u8, 105u8, 108u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 6u8, 100u8, 101u8, + 116u8, 97u8, 105u8, 108u8, 34u8, 142u8, 4u8, 10u8, 12u8, 81u8, 117u8, 111u8, 116u8, + 97u8, 70u8, 97u8, 105u8, 108u8, 117u8, 114u8, 101u8, 18u8, 66u8, 10u8, 10u8, 118u8, + 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 115u8, 24u8, 1u8, 32u8, 3u8, + 40u8, 11u8, 50u8, 34u8, 46u8, 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, + 112u8, 99u8, 46u8, 81u8, 117u8, 111u8, 116u8, 97u8, 70u8, 97u8, 105u8, 108u8, 117u8, + 114u8, 101u8, 46u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, + 82u8, 10u8, 118u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 115u8, + 26u8, 185u8, 3u8, 10u8, 9u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, + 110u8, 18u8, 24u8, 10u8, 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, 24u8, + 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, + 18u8, 32u8, 10u8, 11u8, 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, 112u8, 116u8, 105u8, + 111u8, 110u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 11u8, 100u8, 101u8, 115u8, 99u8, + 114u8, 105u8, 112u8, 116u8, 105u8, 111u8, 110u8, 18u8, 31u8, 10u8, 11u8, 97u8, 112u8, + 105u8, 95u8, 115u8, 101u8, 114u8, 118u8, 105u8, 99u8, 101u8, 24u8, 3u8, 32u8, 1u8, + 40u8, 9u8, 82u8, 10u8, 97u8, 112u8, 105u8, 83u8, 101u8, 114u8, 118u8, 105u8, 99u8, + 101u8, 18u8, 33u8, 10u8, 12u8, 113u8, 117u8, 111u8, 116u8, 97u8, 95u8, 109u8, 101u8, + 116u8, 114u8, 105u8, 99u8, 24u8, 4u8, 32u8, 1u8, 40u8, 9u8, 82u8, 11u8, 113u8, 117u8, + 111u8, 116u8, 97u8, 77u8, 101u8, 116u8, 114u8, 105u8, 99u8, 18u8, 25u8, 10u8, 8u8, + 113u8, 117u8, 111u8, 116u8, 97u8, 95u8, 105u8, 100u8, 24u8, 5u8, 32u8, 1u8, 40u8, + 9u8, 82u8, 7u8, 113u8, 117u8, 111u8, 116u8, 97u8, 73u8, 100u8, 18u8, 98u8, 10u8, + 16u8, 113u8, 117u8, 111u8, 116u8, 97u8, 95u8, 100u8, 105u8, 109u8, 101u8, 110u8, + 115u8, 105u8, 111u8, 110u8, 115u8, 24u8, 6u8, 32u8, 3u8, 40u8, 11u8, 50u8, 55u8, + 46u8, 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, 46u8, 81u8, + 117u8, 111u8, 116u8, 97u8, 70u8, 97u8, 105u8, 108u8, 117u8, 114u8, 101u8, 46u8, 86u8, + 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 46u8, 81u8, 117u8, 111u8, + 116u8, 97u8, 68u8, 105u8, 109u8, 101u8, 110u8, 115u8, 105u8, 111u8, 110u8, 115u8, + 69u8, 110u8, 116u8, 114u8, 121u8, 82u8, 15u8, 113u8, 117u8, 111u8, 116u8, 97u8, 68u8, + 105u8, 109u8, 101u8, 110u8, 115u8, 105u8, 111u8, 110u8, 115u8, 18u8, 31u8, 10u8, + 11u8, 113u8, 117u8, 111u8, 116u8, 97u8, 95u8, 118u8, 97u8, 108u8, 117u8, 101u8, 24u8, + 7u8, 32u8, 1u8, 40u8, 3u8, 82u8, 10u8, 113u8, 117u8, 111u8, 116u8, 97u8, 86u8, 97u8, + 108u8, 117u8, 101u8, 18u8, 49u8, 10u8, 18u8, 102u8, 117u8, 116u8, 117u8, 114u8, + 101u8, 95u8, 113u8, 117u8, 111u8, 116u8, 97u8, 95u8, 118u8, 97u8, 108u8, 117u8, + 101u8, 24u8, 8u8, 32u8, 1u8, 40u8, 3u8, 72u8, 0u8, 82u8, 16u8, 102u8, 117u8, 116u8, + 117u8, 114u8, 101u8, 81u8, 117u8, 111u8, 116u8, 97u8, 86u8, 97u8, 108u8, 117u8, + 101u8, 136u8, 1u8, 1u8, 26u8, 66u8, 10u8, 20u8, 81u8, 117u8, 111u8, 116u8, 97u8, + 68u8, 105u8, 109u8, 101u8, 110u8, 115u8, 105u8, 111u8, 110u8, 115u8, 69u8, 110u8, + 116u8, 114u8, 121u8, 18u8, 16u8, 10u8, 3u8, 107u8, 101u8, 121u8, 24u8, 1u8, 32u8, + 1u8, 40u8, 9u8, 82u8, 3u8, 107u8, 101u8, 121u8, 18u8, 20u8, 10u8, 5u8, 118u8, 97u8, + 108u8, 117u8, 101u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 5u8, 118u8, 97u8, 108u8, + 117u8, 101u8, 58u8, 2u8, 56u8, 1u8, 66u8, 21u8, 10u8, 19u8, 95u8, 102u8, 117u8, + 116u8, 117u8, 114u8, 101u8, 95u8, 113u8, 117u8, 111u8, 116u8, 97u8, 95u8, 118u8, + 97u8, 108u8, 117u8, 101u8, 34u8, 189u8, 1u8, 10u8, 19u8, 80u8, 114u8, 101u8, 99u8, + 111u8, 110u8, 100u8, 105u8, 116u8, 105u8, 111u8, 110u8, 70u8, 97u8, 105u8, 108u8, + 117u8, 114u8, 101u8, 18u8, 73u8, 10u8, 10u8, 118u8, 105u8, 111u8, 108u8, 97u8, 116u8, + 105u8, 111u8, 110u8, 115u8, 24u8, 1u8, 32u8, 3u8, 40u8, 11u8, 50u8, 41u8, 46u8, + 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, 46u8, 80u8, + 114u8, 101u8, 99u8, 111u8, 110u8, 100u8, 105u8, 116u8, 105u8, 111u8, 110u8, 70u8, + 97u8, 105u8, 108u8, 117u8, 114u8, 101u8, 46u8, 86u8, 105u8, 111u8, 108u8, 97u8, + 116u8, 105u8, 111u8, 110u8, 82u8, 10u8, 118u8, 105u8, 111u8, 108u8, 97u8, 116u8, + 105u8, 111u8, 110u8, 115u8, 26u8, 91u8, 10u8, 9u8, 86u8, 105u8, 111u8, 108u8, 97u8, + 116u8, 105u8, 111u8, 110u8, 18u8, 18u8, 10u8, 4u8, 116u8, 121u8, 112u8, 101u8, 24u8, + 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 4u8, 116u8, 121u8, 112u8, 101u8, 18u8, 24u8, 10u8, + 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, + 82u8, 7u8, 115u8, 117u8, 98u8, 106u8, 101u8, 99u8, 116u8, 18u8, 32u8, 10u8, 11u8, + 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, 112u8, 116u8, 105u8, 111u8, 110u8, 24u8, + 3u8, 32u8, 1u8, 40u8, 9u8, 82u8, 11u8, 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, + 112u8, 116u8, 105u8, 111u8, 110u8, 34u8, 140u8, 2u8, 10u8, 10u8, 66u8, 97u8, 100u8, + 82u8, 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 18u8, 80u8, 10u8, 16u8, 102u8, 105u8, + 101u8, 108u8, 100u8, 95u8, 118u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, + 110u8, 115u8, 24u8, 1u8, 32u8, 3u8, 40u8, 11u8, 50u8, 37u8, 46u8, 103u8, 111u8, + 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, 46u8, 66u8, 97u8, 100u8, 82u8, + 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 46u8, 70u8, 105u8, 101u8, 108u8, 100u8, + 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 82u8, 15u8, 102u8, + 105u8, 101u8, 108u8, 100u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, + 110u8, 115u8, 26u8, 171u8, 1u8, 10u8, 14u8, 70u8, 105u8, 101u8, 108u8, 100u8, 86u8, 105u8, 111u8, 108u8, 97u8, 116u8, 105u8, 111u8, 110u8, 18u8, 20u8, 10u8, 5u8, 102u8, 105u8, 101u8, 108u8, 100u8, 24u8, 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 5u8, 102u8, 105u8, 101u8, 108u8, 100u8, 18u8, 32u8, 10u8, 11u8, 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, 112u8, 116u8, 105u8, 111u8, 110u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 11u8, - 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, 112u8, 116u8, 105u8, 111u8, 110u8, 34u8, - 79u8, 10u8, 11u8, 82u8, 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 73u8, 110u8, 102u8, - 111u8, 18u8, 29u8, 10u8, 10u8, 114u8, 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 95u8, - 105u8, 100u8, 24u8, 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 9u8, 114u8, 101u8, 113u8, 117u8, - 101u8, 115u8, 116u8, 73u8, 100u8, 18u8, 33u8, 10u8, 12u8, 115u8, 101u8, 114u8, 118u8, - 105u8, 110u8, 103u8, 95u8, 100u8, 97u8, 116u8, 97u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, - 82u8, 11u8, 115u8, 101u8, 114u8, 118u8, 105u8, 110u8, 103u8, 68u8, 97u8, 116u8, 97u8, - 34u8, 144u8, 1u8, 10u8, 12u8, 82u8, 101u8, 115u8, 111u8, 117u8, 114u8, 99u8, 101u8, - 73u8, 110u8, 102u8, 111u8, 18u8, 35u8, 10u8, 13u8, 114u8, 101u8, 115u8, 111u8, 117u8, + 100u8, 101u8, 115u8, 99u8, 114u8, 105u8, 112u8, 116u8, 105u8, 111u8, 110u8, 18u8, + 22u8, 10u8, 6u8, 114u8, 101u8, 97u8, 115u8, 111u8, 110u8, 24u8, 3u8, 32u8, 1u8, 40u8, + 9u8, 82u8, 6u8, 114u8, 101u8, 97u8, 115u8, 111u8, 110u8, 18u8, 73u8, 10u8, 17u8, + 108u8, 111u8, 99u8, 97u8, 108u8, 105u8, 122u8, 101u8, 100u8, 95u8, 109u8, 101u8, + 115u8, 115u8, 97u8, 103u8, 101u8, 24u8, 4u8, 32u8, 1u8, 40u8, 11u8, 50u8, 28u8, 46u8, + 103u8, 111u8, 111u8, 103u8, 108u8, 101u8, 46u8, 114u8, 112u8, 99u8, 46u8, 76u8, + 111u8, 99u8, 97u8, 108u8, 105u8, 122u8, 101u8, 100u8, 77u8, 101u8, 115u8, 115u8, + 97u8, 103u8, 101u8, 82u8, 16u8, 108u8, 111u8, 99u8, 97u8, 108u8, 105u8, 122u8, 101u8, + 100u8, 77u8, 101u8, 115u8, 115u8, 97u8, 103u8, 101u8, 34u8, 79u8, 10u8, 11u8, 82u8, + 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 73u8, 110u8, 102u8, 111u8, 18u8, 29u8, + 10u8, 10u8, 114u8, 101u8, 113u8, 117u8, 101u8, 115u8, 116u8, 95u8, 105u8, 100u8, + 24u8, 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 9u8, 114u8, 101u8, 113u8, 117u8, 101u8, 115u8, + 116u8, 73u8, 100u8, 18u8, 33u8, 10u8, 12u8, 115u8, 101u8, 114u8, 118u8, 105u8, 110u8, + 103u8, 95u8, 100u8, 97u8, 116u8, 97u8, 24u8, 2u8, 32u8, 1u8, 40u8, 9u8, 82u8, 11u8, + 115u8, 101u8, 114u8, 118u8, 105u8, 110u8, 103u8, 68u8, 97u8, 116u8, 97u8, 34u8, + 144u8, 1u8, 10u8, 12u8, 82u8, 101u8, 115u8, 111u8, 117u8, 114u8, 99u8, 101u8, 73u8, + 110u8, 102u8, 111u8, 18u8, 35u8, 10u8, 13u8, 114u8, 101u8, 115u8, 111u8, 117u8, 114u8, 99u8, 101u8, 95u8, 116u8, 121u8, 112u8, 101u8, 24u8, 1u8, 32u8, 1u8, 40u8, 9u8, 82u8, 12u8, 114u8, 101u8, 115u8, 111u8, 117u8, 114u8, 99u8, 101u8, 84u8, 121u8, 112u8, 101u8, 18u8, 35u8, 10u8, 13u8, 114u8, 101u8, 115u8, 111u8, 117u8, 114u8, 99u8, diff --git a/tonic-types/src/richer_error/mod.rs b/tonic-types/src/richer_error/mod.rs index 927bbc97d..54cd9447c 100644 --- a/tonic-types/src/richer_error/mod.rs +++ b/tonic-types/src/richer_error/mod.rs @@ -1002,7 +1002,7 @@ mod tests { .add_help_link("link to resource", "resource.example.local") .set_localized_message("en-US", "message for the user"); - let fmt_details = format!("{:?}", err_details); + let fmt_details = format!("{err_details:?}"); let err_details_vec = vec![ RetryInfo::new(Some(Duration::from_secs(5))).into(), @@ -1021,7 +1021,7 @@ mod tests { LocalizedMessage::new("en-US", "message for the user").into(), ]; - let fmt_details_vec = format!("{:?}", err_details_vec); + let fmt_details_vec = format!("{err_details_vec:?}"); let status_from_struct = Status::with_error_details( Code::InvalidArgument, @@ -1037,13 +1037,10 @@ mod tests { let ext_details = match status_from_vec.check_error_details() { Ok(ext_details) => ext_details, - Err(err) => panic!( - "Error extracting details struct from status_from_vec: {:?}", - err - ), + Err(err) => panic!("Error extracting details struct from status_from_vec: {err:?}"), }; - let fmt_ext_details = format!("{:?}", ext_details); + let fmt_ext_details = format!("{ext_details:?}"); assert!( fmt_ext_details.eq(&fmt_details), @@ -1052,13 +1049,10 @@ mod tests { let ext_details_vec = match status_from_struct.check_error_details_vec() { Ok(ext_details) => ext_details, - Err(err) => panic!( - "Error extracting details_vec from status_from_struct: {:?}", - err - ), + Err(err) => panic!("Error extracting details_vec from status_from_struct: {err:?}"), }; - let fmt_ext_details_vec = format!("{:?}", ext_details_vec); + let fmt_ext_details_vec = format!("{ext_details_vec:?}"); assert!( fmt_ext_details_vec.eq(&fmt_details_vec), diff --git a/tonic-types/src/richer_error/std_messages/bad_request.rs b/tonic-types/src/richer_error/std_messages/bad_request.rs index c9fcb984a..078d09cdf 100644 --- a/tonic-types/src/richer_error/std_messages/bad_request.rs +++ b/tonic-types/src/richer_error/std_messages/bad_request.rs @@ -1,13 +1,13 @@ use prost::{DecodeError, Message}; use prost_types::Any; -use crate::richer_error::FromAnyRef; +use crate::{richer_error::FromAnyRef, LocalizedMessage}; use super::super::{pb, FromAny, IntoAny}; /// Used at the `field_violations` field of the [`BadRequest`] struct. /// Describes a single bad request field. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct FieldViolation { /// Path leading to a field in the request body. Value should be a /// sequence of dot-separated identifiers that identify a protocol buffer @@ -16,6 +16,14 @@ pub struct FieldViolation { /// Description of why the field is bad. pub description: String, + + /// The reason of the field-level error. Value should be a + /// SCREAMING_SNAKE_CASE error identifier from the domain of the API + /// service. + pub reason: String, + + /// A localized version of the field-level error. + pub localized_message: Option, } impl FieldViolation { @@ -24,6 +32,7 @@ impl FieldViolation { FieldViolation { field: field.into(), description: description.into(), + ..Default::default() } } } @@ -33,6 +42,8 @@ impl From for FieldViolation { FieldViolation { field: value.field, description: value.description, + reason: value.reason, + localized_message: value.localized_message.map(Into::into), } } } @@ -42,6 +53,7 @@ impl From for pb::bad_request::FieldViolation { pb::bad_request::FieldViolation { field: value.field, description: value.description, + ..Default::default() } } } @@ -75,6 +87,7 @@ impl BadRequest { field_violations: vec![FieldViolation { field: field.into(), description: description.into(), + ..Default::default() }], } } @@ -88,6 +101,7 @@ impl BadRequest { self.field_violations.append(&mut vec![FieldViolation { field: field.into(), description: description.into(), + ..Default::default() }]); self } @@ -150,7 +164,7 @@ mod tests { #[test] fn gen_bad_request() { let mut br_details = BadRequest::new(Vec::new()); - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); let expected = "BadRequest { field_violations: [] }"; @@ -168,9 +182,9 @@ mod tests { .add_violation("field_a", "description_a") .add_violation("field_b", "description_b"); - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); - let expected_filled = "BadRequest { field_violations: [FieldViolation { field: \"field_a\", description: \"description_a\" }, FieldViolation { field: \"field_b\", description: \"description_b\" }] }"; + let expected_filled = "BadRequest { field_violations: [FieldViolation { field: \"field_a\", description: \"description_a\", reason: \"\", localized_message: None }, FieldViolation { field: \"field_b\", description: \"description_b\", reason: \"\", localized_message: None }] }"; assert!( formatted.eq(expected_filled), @@ -183,7 +197,7 @@ mod tests { ); let gen_any = br_details.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.BadRequest\", value: [10, 24, 10, 7, 102, 105, 101, 108, 100, 95, 97, 18, 13, 100, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110, 95, 97, 10, 24, 10, 7, 102, 105, 101, 108, 100, 95, 98, 18, 13, 100, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110, 95, 98] }"; @@ -193,11 +207,11 @@ mod tests { ); let br_details = match BadRequest::from_any(gen_any) { - Err(error) => panic!("Error generating BadRequest from Any: {:?}", error), + Err(error) => panic!("Error generating BadRequest from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/debug_info.rs b/tonic-types/src/richer_error/std_messages/debug_info.rs index 9c450c774..c71954099 100644 --- a/tonic-types/src/richer_error/std_messages/debug_info.rs +++ b/tonic-types/src/richer_error/std_messages/debug_info.rs @@ -94,7 +94,7 @@ mod tests { "details about the error", ); - let formatted = format!("{:?}", debug_info); + let formatted = format!("{debug_info:?}"); let expected_filled = "DebugInfo { stack_entries: [\"trace 3\", \"trace 2\", \"trace 1\"], detail: \"details about the error\" }"; @@ -104,7 +104,7 @@ mod tests { ); let gen_any = debug_info.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.DebugInfo\", value: [10, 7, 116, 114, 97, 99, 101, 32, 51, 10, 7, 116, 114, 97, 99, 101, 32, 50, 10, 7, 116, 114, 97, 99, 101, 32, 49, 18, 23, 100, 101, 116, 97, 105, 108, 115, 32, 97, 98, 111, 117, 116, 32, 116, 104, 101, 32, 101, 114, 114, 111, 114] }"; @@ -115,11 +115,11 @@ mod tests { ); let br_details = match DebugInfo::from_any(gen_any) { - Err(error) => panic!("Error generating DebugInfo from Any: {:?}", error), + Err(error) => panic!("Error generating DebugInfo from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/error_info.rs b/tonic-types/src/richer_error/std_messages/error_info.rs index 8d535a157..8b1f36a7e 100644 --- a/tonic-types/src/richer_error/std_messages/error_info.rs +++ b/tonic-types/src/richer_error/std_messages/error_info.rs @@ -114,7 +114,7 @@ mod tests { let error_info = ErrorInfo::new("SOME_INFO", "mydomain.com", metadata); - let formatted = format!("{:?}", error_info); + let formatted = format!("{error_info:?}"); let expected_filled = "ErrorInfo { reason: \"SOME_INFO\", domain: \"mydomain.com\", metadata: {\"instanceLimitPerRequest\": \"100\"} }"; @@ -125,7 +125,7 @@ mod tests { let gen_any = error_info.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.ErrorInfo\", value: [10, 9, 83, 79, 77, 69, 95, 73, 78, 70, 79, 18, 12, 109, 121, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 26, 30, 10, 23, 105, 110, 115, 116, 97, 110, 99, 101, 76, 105, 109, 105, 116, 80, 101, 114, 82, 101, 113, 117, 101, 115, 116, 18, 3, 49, 48, 48] }"; @@ -136,11 +136,11 @@ mod tests { ); let br_details = match ErrorInfo::from_any(gen_any) { - Err(error) => panic!("Error generating ErrorInfo from Any: {:?}", error), + Err(error) => panic!("Error generating ErrorInfo from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/help.rs b/tonic-types/src/richer_error/std_messages/help.rs index 08549b2e9..67e44a3f8 100644 --- a/tonic-types/src/richer_error/std_messages/help.rs +++ b/tonic-types/src/richer_error/std_messages/help.rs @@ -146,7 +146,7 @@ mod tests { #[test] fn gen_help() { let mut help = Help::new(Vec::new()); - let formatted = format!("{:?}", help); + let formatted = format!("{help:?}"); let expected = "Help { links: [] }"; @@ -163,7 +163,7 @@ mod tests { help.add_link("link to resource a", "resource-a.example.local") .add_link("link to resource b", "resource-b.example.local"); - let formatted = format!("{:?}", help); + let formatted = format!("{help:?}"); let expected_filled = "Help { links: [HelpLink { description: \"link to resource a\", url: \"resource-a.example.local\" }, HelpLink { description: \"link to resource b\", url: \"resource-b.example.local\" }] }"; @@ -179,7 +179,7 @@ mod tests { let gen_any = help.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.Help\", value: [10, 46, 10, 18, 108, 105, 110, 107, 32, 116, 111, 32, 114, 101, 115, 111, 117, 114, 99, 101, 32, 97, 18, 24, 114, 101, 115, 111, 117, 114, 99, 101, 45, 97, 46, 101, 120, 97, 109, 112, 108, 101, 46, 108, 111, 99, 97, 108, 10, 46, 10, 18, 108, 105, 110, 107, 32, 116, 111, 32, 114, 101, 115, 111, 117, 114, 99, 101, 32, 98, 18, 24, 114, 101, 115, 111, 117, 114, 99, 101, 45, 98, 46, 101, 120, 97, 109, 112, 108, 101, 46, 108, 111, 99, 97, 108] }"; @@ -189,11 +189,11 @@ mod tests { ); let br_details = match Help::from_any(gen_any) { - Err(error) => panic!("Error generating Help from Any: {:?}", error), + Err(error) => panic!("Error generating Help from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/loc_message.rs b/tonic-types/src/richer_error/std_messages/loc_message.rs index 98ab72bce..df7430273 100644 --- a/tonic-types/src/richer_error/std_messages/loc_message.rs +++ b/tonic-types/src/richer_error/std_messages/loc_message.rs @@ -10,7 +10,7 @@ use super::super::{pb, FromAny, IntoAny}; /// that is safe to return to the user. /// /// [error_details.proto]: https://github.com/googleapis/googleapis/blob/master/google/rpc/error_details.proto -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct LocalizedMessage { /// Locale used, following the specification defined in [BCP 47]. For /// example: "en-US", "fr-CH" or "es-MX". @@ -95,7 +95,7 @@ mod tests { fn gen_localized_message() { let loc_message = LocalizedMessage::new("en-US", "message for the user"); - let formatted = format!("{:?}", loc_message); + let formatted = format!("{loc_message:?}"); let expected_filled = "LocalizedMessage { locale: \"en-US\", message: \"message for the user\" }"; @@ -107,7 +107,7 @@ mod tests { let gen_any = loc_message.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.LocalizedMessage\", value: [10, 5, 101, 110, 45, 85, 83, 18, 20, 109, 101, 115, 115, 97, 103, 101, 32, 102, 111, 114, 32, 116, 104, 101, 32, 117, 115, 101, 114] }"; @@ -118,11 +118,11 @@ mod tests { ); let br_details = match LocalizedMessage::from_any(gen_any) { - Err(error) => panic!("Error generating LocalizedMessage from Any: {:?}", error), + Err(error) => panic!("Error generating LocalizedMessage from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/prec_failure.rs b/tonic-types/src/richer_error/std_messages/prec_failure.rs index a8a5c50f1..9e4f8884b 100644 --- a/tonic-types/src/richer_error/std_messages/prec_failure.rs +++ b/tonic-types/src/richer_error/std_messages/prec_failure.rs @@ -170,7 +170,7 @@ mod tests { #[test] fn gen_prec_failure() { let mut prec_failure = PreconditionFailure::new(Vec::new()); - let formatted = format!("{:?}", prec_failure); + let formatted = format!("{prec_failure:?}"); let expected = "PreconditionFailure { violations: [] }"; @@ -188,7 +188,7 @@ mod tests { .add_violation("TOS", "example.local", "Terms of service not accepted") .add_violation("FNF", "example.local", "File not found"); - let formatted = format!("{:?}", prec_failure); + let formatted = format!("{prec_failure:?}"); let expected_filled = "PreconditionFailure { violations: [PreconditionViolation { type: \"TOS\", subject: \"example.local\", description: \"Terms of service not accepted\" }, PreconditionViolation { type: \"FNF\", subject: \"example.local\", description: \"File not found\" }] }"; @@ -204,7 +204,7 @@ mod tests { let gen_any = prec_failure.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.PreconditionFailure\", value: [10, 51, 10, 3, 84, 79, 83, 18, 13, 101, 120, 97, 109, 112, 108, 101, 46, 108, 111, 99, 97, 108, 26, 29, 84, 101, 114, 109, 115, 32, 111, 102, 32, 115, 101, 114, 118, 105, 99, 101, 32, 110, 111, 116, 32, 97, 99, 99, 101, 112, 116, 101, 100, 10, 36, 10, 3, 70, 78, 70, 18, 13, 101, 120, 97, 109, 112, 108, 101, 46, 108, 111, 99, 97, 108, 26, 14, 70, 105, 108, 101, 32, 110, 111, 116, 32, 102, 111, 117, 110, 100] }"; @@ -214,11 +214,11 @@ mod tests { ); let br_details = match PreconditionFailure::from_any(gen_any) { - Err(error) => panic!("Error generating PreconditionFailure from Any: {:?}", error), + Err(error) => panic!("Error generating PreconditionFailure from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/quota_failure.rs b/tonic-types/src/richer_error/std_messages/quota_failure.rs index 23d160521..6b68ba934 100644 --- a/tonic-types/src/richer_error/std_messages/quota_failure.rs +++ b/tonic-types/src/richer_error/std_messages/quota_failure.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use prost::{DecodeError, Message}; use prost_types::Any; @@ -7,13 +9,32 @@ use super::super::{pb, FromAny, IntoAny}; /// Used at the `violations` field of the [`QuotaFailure`] struct. Describes a /// single quota violation. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct QuotaViolation { /// Subject on which the quota check failed. pub subject: String, /// Description of why the quota check failed. pub description: String, + + /// The API service from which the quota check originates. + pub api_service: String, + + /// The quota check that was violated. + pub quota_metric: String, + + /// The ID of the violated quota check. + pub quota_id: String, + + /// The dimensions of the violated quota check. + pub quota_dimensions: HashMap, + + /// The quota check value at the time of violation. + pub quota_value: i64, + + /// The future value of the quota check value when a quota check rollout is + /// in progress. + pub futura_quota_value: Option, } impl QuotaViolation { @@ -22,6 +43,7 @@ impl QuotaViolation { QuotaViolation { subject: subject.into(), description: description.into(), + ..Default::default() } } } @@ -31,6 +53,12 @@ impl From for QuotaViolation { QuotaViolation { subject: value.subject, description: value.description, + api_service: value.api_service, + quota_metric: value.quota_metric, + quota_id: value.quota_id, + quota_dimensions: value.quota_dimensions, + quota_value: value.quota_value, + futura_quota_value: value.future_quota_value, } } } @@ -40,6 +68,12 @@ impl From for pb::quota_failure::Violation { pb::quota_failure::Violation { subject: value.subject, description: value.description, + api_service: value.api_service, + quota_metric: value.quota_metric, + quota_id: value.quota_id, + quota_dimensions: value.quota_dimensions, + quota_value: value.quota_value, + future_quota_value: value.futura_quota_value, } } } @@ -72,6 +106,7 @@ impl QuotaFailure { violations: vec![QuotaViolation { subject: subject.into(), description: description.into(), + ..Default::default() }], } } @@ -85,6 +120,7 @@ impl QuotaFailure { self.violations.append(&mut vec![QuotaViolation { subject: subject.into(), description: description.into(), + ..Default::default() }]); self } @@ -147,7 +183,7 @@ mod tests { #[test] fn gen_quota_failure() { let mut quota_failure = QuotaFailure::new(Vec::new()); - let formatted = format!("{:?}", quota_failure); + let formatted = format!("{quota_failure:?}"); let expected = "QuotaFailure { violations: [] }"; @@ -165,9 +201,9 @@ mod tests { .add_violation("clientip:", "description a") .add_violation("project:", "description b"); - let formatted = format!("{:?}", quota_failure); + let formatted = format!("{quota_failure:?}"); - let expected_filled = "QuotaFailure { violations: [QuotaViolation { subject: \"clientip:\", description: \"description a\" }, QuotaViolation { subject: \"project:\", description: \"description b\" }] }"; + let expected_filled = "QuotaFailure { violations: [QuotaViolation { subject: \"clientip:\", description: \"description a\", api_service: \"\", quota_metric: \"\", quota_id: \"\", quota_dimensions: {}, quota_value: 0, futura_quota_value: None }, QuotaViolation { subject: \"project:\", description: \"description b\", api_service: \"\", quota_metric: \"\", quota_id: \"\", quota_dimensions: {}, quota_value: 0, futura_quota_value: None }] }"; assert!( formatted.eq(expected_filled), @@ -181,7 +217,7 @@ mod tests { let gen_any = quota_failure.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.QuotaFailure\", value: [10, 38, 10, 21, 99, 108, 105, 101, 110, 116, 105, 112, 58, 60, 105, 112, 32, 97, 100, 100, 114, 101, 115, 115, 62, 18, 13, 100, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110, 32, 97, 10, 37, 10, 20, 112, 114, 111, 106, 101, 99, 116, 58, 60, 112, 114, 111, 106, 101, 99, 116, 32, 105, 100, 62, 18, 13, 100, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110, 32, 98] }"; @@ -191,11 +227,11 @@ mod tests { ); let br_details = match QuotaFailure::from_any(gen_any) { - Err(error) => panic!("Error generating QuotaFailure from Any: {:?}", error), + Err(error) => panic!("Error generating QuotaFailure from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/request_info.rs b/tonic-types/src/richer_error/std_messages/request_info.rs index bb09f0c5b..66a84633a 100644 --- a/tonic-types/src/richer_error/std_messages/request_info.rs +++ b/tonic-types/src/richer_error/std_messages/request_info.rs @@ -94,7 +94,7 @@ mod tests { fn gen_request_info() { let req_info = RequestInfo::new("some-id", "some-data"); - let formatted = format!("{:?}", req_info); + let formatted = format!("{req_info:?}"); let expected_filled = "RequestInfo { request_id: \"some-id\", serving_data: \"some-data\" }"; @@ -106,7 +106,7 @@ mod tests { let gen_any = req_info.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.RequestInfo\", value: [10, 7, 115, 111, 109, 101, 45, 105, 100, 18, 9, 115, 111, 109, 101, 45, 100, 97, 116, 97] }"; @@ -117,11 +117,11 @@ mod tests { ); let br_details = match RequestInfo::from_any(gen_any) { - Err(error) => panic!("Error generating RequestInfo from Any: {:?}", error), + Err(error) => panic!("Error generating RequestInfo from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/resource_info.rs b/tonic-types/src/richer_error/std_messages/resource_info.rs index ccb23e1a7..15b796442 100644 --- a/tonic-types/src/richer_error/std_messages/resource_info.rs +++ b/tonic-types/src/richer_error/std_messages/resource_info.rs @@ -111,7 +111,7 @@ mod tests { fn gen_resource_info() { let res_info = ResourceInfo::new("resource-type", "resource-name", "owner", "description"); - let formatted = format!("{:?}", res_info); + let formatted = format!("{res_info:?}"); let expected_filled = "ResourceInfo { resource_type: \"resource-type\", resource_name: \"resource-name\", owner: \"owner\", description: \"description\" }"; @@ -122,7 +122,7 @@ mod tests { let gen_any = res_info.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.ResourceInfo\", value: [10, 13, 114, 101, 115, 111, 117, 114, 99, 101, 45, 116, 121, 112, 101, 18, 13, 114, 101, 115, 111, 117, 114, 99, 101, 45, 110, 97, 109, 101, 26, 5, 111, 119, 110, 101, 114, 34, 11, 100, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110] }"; @@ -133,11 +133,11 @@ mod tests { ); let br_details = match ResourceInfo::from_any(gen_any) { - Err(error) => panic!("Error generating ResourceInfo from Any: {:?}", error), + Err(error) => panic!("Error generating ResourceInfo from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-types/src/richer_error/std_messages/retry_info.rs b/tonic-types/src/richer_error/std_messages/retry_info.rs index 93697afb1..ccb3a2b0f 100644 --- a/tonic-types/src/richer_error/std_messages/retry_info.rs +++ b/tonic-types/src/richer_error/std_messages/retry_info.rs @@ -126,7 +126,7 @@ mod tests { fn gen_retry_info() { let retry_info = RetryInfo::new(Some(Duration::from_secs(u64::MAX))); - let formatted = format!("{:?}", retry_info); + let formatted = format!("{retry_info:?}"); let expected_filled = "RetryInfo { retry_delay: Some(315576000000.999999999s) }"; @@ -142,7 +142,7 @@ mod tests { let gen_any = retry_info.into_any(); - let formatted = format!("{:?}", gen_any); + let formatted = format!("{gen_any:?}"); let expected = "Any { type_url: \"type.googleapis.com/google.rpc.RetryInfo\", value: [10, 13, 8, 128, 188, 174, 206, 151, 9, 16, 255, 147, 235, 220, 3] }"; @@ -153,11 +153,11 @@ mod tests { ); let br_details = match RetryInfo::from_any(gen_any) { - Err(error) => panic!("Error generating RetryInfo from Any: {:?}", error), + Err(error) => panic!("Error generating RetryInfo from Any: {error:?}"), Ok(from_any) => from_any, }; - let formatted = format!("{:?}", br_details); + let formatted = format!("{br_details:?}"); assert!( formatted.eq(expected_filled), diff --git a/tonic-web/Cargo.toml b/tonic-web/Cargo.toml index 487efe709..0144dd81b 100644 --- a/tonic-web/Cargo.toml +++ b/tonic-web/Cargo.toml @@ -11,7 +11,7 @@ license = "MIT" name = "tonic-web" readme = "README.md" repository = "https://github.com/hyperium/tonic" -version = "0.13.0" +version = "0.14.0" rust-version = { workspace = true } [dependencies] @@ -21,7 +21,7 @@ tokio-stream = { version = "0.1", default-features = false } http = "1" http-body = "1" pin-project = "1" -tonic = { version = "0.13.0", path = "../tonic", default-features = false } +tonic = { version = "0.14.0", path = "../tonic", default-features = false } tower-service = "0.3" tower-layer = "0.3" tracing = "0.1" diff --git a/tonic-web/LICENSE b/tonic-web/LICENSE deleted file mode 100644 index 307709840..000000000 --- a/tonic-web/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2020 Lucio Franco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/tonic-web/LICENSE b/tonic-web/LICENSE new file mode 120000 index 000000000..ea5b60640 --- /dev/null +++ b/tonic-web/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/tonic-web/src/call.rs b/tonic-web/src/call.rs index 90cfafca9..a5e0ad859 100644 --- a/tonic-web/src/call.rs +++ b/tonic-web/src/call.rs @@ -373,7 +373,7 @@ impl Encoding { } fn internal_error(e: impl std::fmt::Display) -> Status { - Status::internal(format!("tonic-web: {}", e)) + Status::internal(format!("tonic-web: {e}")) } // Key-value pairs encoded as a HTTP/1 headers block (without the terminating newline) @@ -434,9 +434,9 @@ fn decode_trailers_frame(mut buf: Bytes) -> Result, Status> { .unwrap_or(value); let header_key = HeaderName::try_from(key) - .map_err(|e| Status::internal(format!("Unable to parse HeaderName: {}", e)))?; + .map_err(|e| Status::internal(format!("Unable to parse HeaderName: {e}")))?; let header_value = HeaderValue::try_from(value) - .map_err(|e| Status::internal(format!("Unable to parse HeaderValue: {}", e)))?; + .map_err(|e| Status::internal(format!("Unable to parse HeaderValue: {e}")))?; map.insert(header_key, header_value); } @@ -479,8 +479,7 @@ fn find_trailers(buf: &[u8]) -> Result { if !(header == 0 || header == 1) { return Err(Status::internal(format!( - "Invalid header bit {} expected 0 or 1", - header + "Invalid header bit {header} expected 0 or 1" ))); } diff --git a/tonic-web/src/service.rs b/tonic-web/src/service.rs index ab9abc0b8..e0d75f4b3 100644 --- a/tonic-web/src/service.rs +++ b/tonic-web/src/service.rs @@ -359,8 +359,7 @@ mod tests { assert_eq!( res.status(), StatusCode::METHOD_NOT_ALLOWED, - "{} should not be allowed", - method + "{method} should not be allowed" ); } } @@ -445,7 +444,7 @@ mod tests { let mut req = request(); req.headers_mut().insert( CONTENT_TYPE, - HeaderValue::from_maybe_shared(format!("application/{}", variant)).unwrap(), + HeaderValue::from_maybe_shared(format!("application/{variant}")).unwrap(), ); let res = svc.call(req).await.unwrap(); diff --git a/tonic/Cargo.toml b/tonic/Cargo.toml index 95f929333..27768924c 100644 --- a/tonic/Cargo.toml +++ b/tonic/Cargo.toml @@ -15,7 +15,7 @@ keywords = ["rpc", "grpc", "async", "futures", "protobuf"] license = "MIT" readme = "../README.md" repository = "https://github.com/hyperium/tonic" -version = "0.13.0" +version = "0.14.0" rust-version = {workspace = true} exclude = ["benches-disabled"] @@ -39,12 +39,12 @@ server = [ "dep:socket2", "dep:tokio", "tokio?/macros", "tokio?/net", "tokio?/time", "tokio-stream/net", - "dep:tower", "tower?/util", "tower?/limit", + "dep:tower", "tower?/util", "tower?/limit", "tower?/load-shed", ] channel = [ "dep:hyper", "hyper?/client", "dep:hyper-util", "hyper-util?/client-legacy", - "dep:tower", "tower?/balance", "tower?/buffer", "tower?/discover", "tower?/limit", "tower?/util", + "dep:tower", "tower?/balance", "tower?/buffer", "tower?/discover", "tower?/limit", "tower?/load-shed", "tower?/util", "dep:tokio", "tokio?/time", "dep:hyper-timeout", ] @@ -69,7 +69,7 @@ tower-service = "0.3" tokio-stream = {version = "0.1.16", default-features = false} # prost -prost = {version = "0.13", default-features = false, features = ["std"], optional = true} +prost = {version = "0.14", default-features = false, features = ["std"], optional = true} # codegen async-trait = {version = "0.1.13", optional = true} @@ -86,7 +86,7 @@ axum = {version = "0.8", default-features = false, optional = true} # rustls rustls-native-certs = { version = "0.8", optional = true } tokio-rustls = { version = "0.26.1", default-features = false, features = ["logging", "tls12"], optional = true } -webpki-roots = { version = "0.26", optional = true } +webpki-roots = { version = "1", optional = true } # compression flate2 = {version = "1.0", optional = true} @@ -94,13 +94,14 @@ zstd = { version = "0.13.0", optional = true } # channel hyper-timeout = {version = "0.5", optional = true} +sync_wrapper = "1.0.2" [dev-dependencies] bencher = "0.1.5" quickcheck = "1.0" quickcheck_macros = "1.0" static_assertions = "1.0" -tokio = {version = "1.0", features = ["rt", "macros"]} +tokio = {version = "1.0", features = ["rt-multi-thread", "macros"]} tower = {version = "0.5", features = ["load-shed", "timeout"]} [lints] diff --git a/tonic/LICENSE b/tonic/LICENSE deleted file mode 100644 index 307709840..000000000 --- a/tonic/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2020 Lucio Franco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/tonic/LICENSE b/tonic/LICENSE new file mode 120000 index 000000000..ea5b60640 --- /dev/null +++ b/tonic/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/tonic/benches/decode.rs b/tonic/benches/decode.rs index a6a6e58a9..5ba4dfccc 100644 --- a/tonic/benches/decode.rs +++ b/tonic/benches/decode.rs @@ -73,7 +73,7 @@ impl Body for MockBody { } fn is_end_stream(&self) -> bool { - !self.data.is_empty() + self.data.is_empty() } fn size_hint(&self) -> SizeHint { diff --git a/tonic/src/client/grpc.rs b/tonic/src/client/grpc.rs index cd8a6b7d6..981b72380 100644 --- a/tonic/src/client/grpc.rs +++ b/tonic/src/client/grpc.rs @@ -181,7 +181,7 @@ impl Grpc { /// .await /// .unwrap(); /// - /// // Set the limit to 2MB, Defaults to 4MB. + /// // Set the limit to 2MB, Defaults to `usize::MAX`. /// let limit = 2 * 1024 * 1024; /// let client = TestClient::new(channel).max_encoding_message_size(limit); /// # }; diff --git a/tonic/src/codec/decode.rs b/tonic/src/codec/decode.rs index 4742291fd..a221a5c93 100644 --- a/tonic/src/codec/decode.rs +++ b/tonic/src/codec/decode.rs @@ -11,6 +11,7 @@ use std::{ task::ready, task::{Context, Poll}, }; +use sync_wrapper::SyncWrapper; use tokio_stream::Stream; use tracing::{debug, trace}; @@ -19,12 +20,12 @@ use tracing::{debug, trace}; /// This will wrap some inner [`Body`] and [`Decoder`] and provide an interface /// to fetch the message stream and trailing metadata pub struct Streaming { - decoder: Box + Send + 'static>, + decoder: SyncWrapper + Send + 'static>>, inner: StreamingInner, } struct StreamingInner { - body: Body, + body: SyncWrapper, state: State, direction: Direction, buf: BytesMut, @@ -123,14 +124,14 @@ impl Streaming { { let buffer_size = decoder.buffer_settings().buffer_size; Self { - decoder: Box::new(decoder), + decoder: SyncWrapper::new(Box::new(decoder)), inner: StreamingInner { - body: Body::new( + body: SyncWrapper::new(Body::new( body.map_frame(|frame| { frame.map_data(|mut buf| buf.copy_to_bytes(buf.remaining())) }) .map_err(|err| Status::map_error(err.into())), - ), + )), state: State::ReadHeader, direction, buf: BytesMut::with_capacity(buffer_size), @@ -172,11 +173,10 @@ impl StreamingInner { trace!("unexpected compression flag"); let message = if let Direction::Response(status) = self.direction { format!( - "protocol error: received message with invalid compression flag: {} (valid flags are 0 and 1) while receiving response with status: {}", - f, status + "protocol error: received message with invalid compression flag: {f} (valid flags are 0 and 1) while receiving response with status: {status}" ) } else { - format!("protocol error: received message with invalid compression flag: {} (valid flags are 0 and 1), while sending request", f) + format!("protocol error: received message with invalid compression flag: {f} (valid flags are 0 and 1), while sending request") }; return Err(Status::internal(message)); } @@ -189,8 +189,7 @@ impl StreamingInner { if len > limit { return Err(Status::out_of_range( format!( - "Error, decoded message length too large: found {} bytes, the limit is: {} bytes", - len, limit + "Error, decoded message length too large: found {len} bytes, the limit is: {limit} bytes" ), )); } @@ -224,11 +223,10 @@ impl StreamingInner { ) { let message = if let Direction::Response(status) = self.direction { format!( - "Error decompressing: {}, while receiving response with status: {}", - err, status + "Error decompressing: {err}, while receiving response with status: {status}" ) } else { - format!("Error decompressing: {}, while sending request", err) + format!("Error decompressing: {err}, while sending request") }; return Err(Status::internal(message)); } @@ -246,7 +244,7 @@ impl StreamingInner { // Returns Some(()) if data was found or None if the loop in `poll_next` should break fn poll_frame(&mut self, cx: &mut Context<'_>) -> Poll, Status>> { - let frame = match ready!(Pin::new(&mut self.body).poll_frame(cx)) { + let frame = match ready!(Pin::new(self.body.get_mut()).poll_frame(cx)) { Some(Ok(frame)) => frame, Some(Err(status)) => { if self.direction == Direction::Request && status.code() == Code::Cancelled { @@ -280,7 +278,7 @@ impl StreamingInner { Ok(None) } else { - panic!("unexpected frame: {:?}", frame); + panic!("unexpected frame: {frame:?}"); }) } @@ -370,8 +368,11 @@ impl Streaming { } fn decode_chunk(&mut self) -> Result, Status> { - match self.inner.decode_chunk(self.decoder.buffer_settings())? { - Some(mut decode_buf) => match self.decoder.decode(&mut decode_buf)? { + match self + .inner + .decode_chunk(self.decoder.get_mut().buffer_settings())? + { + Some(mut decode_buf) => match self.decoder.get_mut().decode(&mut decode_buf)? { Some(msg) => { self.inner.state = State::ReadHeader; Ok(Some(msg)) @@ -416,4 +417,4 @@ impl fmt::Debug for Streaming { } #[cfg(test)] -static_assertions::assert_impl_all!(Streaming<()>: Send); +static_assertions::assert_impl_all!(Streaming<()>: Send, Sync); diff --git a/tonic/src/codec/encode.rs b/tonic/src/codec/encode.rs index 52952ff5b..7568f0515 100644 --- a/tonic/src/codec/encode.rs +++ b/tonic/src/codec/encode.rs @@ -154,7 +154,7 @@ where encoder .encode(item, &mut EncodeBuf::new(uncompression_buf)) - .map_err(|err| Status::internal(format!("Error encoding: {}", err)))?; + .map_err(|err| Status::internal(format!("Error encoding: {err}")))?; let uncompressed_len = uncompression_buf.len(); @@ -167,11 +167,11 @@ where buf, uncompressed_len, ) - .map_err(|err| Status::internal(format!("Error compressing: {}", err)))?; + .map_err(|err| Status::internal(format!("Error compressing: {err}")))?; } else { encoder .encode(item, &mut EncodeBuf::new(buf)) - .map_err(|err| Status::internal(format!("Error encoding: {}", err)))?; + .map_err(|err| Status::internal(format!("Error encoding: {err}")))?; } // now that we know length, we can write the header @@ -187,8 +187,7 @@ fn finish_encoding( let limit = max_message_size.unwrap_or(DEFAULT_MAX_SEND_MESSAGE_SIZE); if len > limit { return Err(Status::out_of_range(format!( - "Error, encoded message length too large: found {} bytes, the limit is: {} bytes", - len, limit + "Error, encoded message length too large: found {len} bytes, the limit is: {limit} bytes" ))); } diff --git a/tonic/src/metadata/encoding.rs b/tonic/src/metadata/encoding.rs index cf2c38a53..46eae8ee2 100644 --- a/tonic/src/metadata/encoding.rs +++ b/tonic/src/metadata/encoding.rs @@ -139,7 +139,7 @@ impl self::value_encoding::Sealed for Binary { fn from_static(value: &'static str) -> HeaderValue { if crate::util::base64::STANDARD.decode(value).is_err() { - panic!("Invalid base64 passed to from_static: {}", value); + panic!("Invalid base64 passed to from_static: {value}"); } unsafe { // Because this is valid base64 this must be a valid HTTP header value, @@ -173,9 +173,9 @@ impl self::value_encoding::Sealed for Binary { fn fmt(value: &HeaderValue, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Ok(decoded) = Self::decode(value.as_bytes()) { - write!(f, "{:?}", decoded) + write!(f, "{decoded:?}") } else { - write!(f, "b[invalid]{:?}", value) + write!(f, "b[invalid]{value:?}") } } } diff --git a/tonic/src/metadata/map.rs b/tonic/src/metadata/map.rs index dc6d4d39e..8e08421c3 100644 --- a/tonic/src/metadata/map.rs +++ b/tonic/src/metadata/map.rs @@ -214,9 +214,8 @@ pub(crate) const GRPC_TIMEOUT_HEADER: &str = "grpc-timeout"; impl MetadataMap { // Headers reserved by the gRPC protocol. - pub(crate) const GRPC_RESERVED_HEADERS: [HeaderName; 6] = [ + pub(crate) const GRPC_RESERVED_HEADERS: [HeaderName; 5] = [ HeaderName::from_static("te"), - HeaderName::from_static("user-agent"), HeaderName::from_static("content-type"), HeaderName::from_static("grpc-message"), HeaderName::from_static("grpc-message-type"), diff --git a/tonic/src/metadata/value.rs b/tonic/src/metadata/value.rs index eb9eeda1c..46086279a 100644 --- a/tonic/src/metadata/value.rs +++ b/tonic/src/metadata/value.rs @@ -795,13 +795,13 @@ fn test_debug() { for &(value, expected) in cases { let val = AsciiMetadataValue::try_from(value.as_bytes()).unwrap(); - let actual = format!("{:?}", val); + let actual = format!("{val:?}"); assert_eq!(expected, actual); } let mut sensitive = AsciiMetadataValue::from_static("password"); sensitive.set_sensitive(true); - assert_eq!("Sensitive", format!("{:?}", sensitive)); + assert_eq!("Sensitive", format!("{sensitive:?}")); } #[test] diff --git a/tonic/src/request.rs b/tonic/src/request.rs index 0f54f9785..d62563661 100644 --- a/tonic/src/request.rs +++ b/tonic/src/request.rs @@ -408,7 +408,7 @@ fn duration_to_grpc_timeout(duration: Duration) -> String { if value > max_size { None } else { - Some(format!("{}{}", value, unit)) + Some(format!("{value}{unit}")) } } @@ -462,6 +462,25 @@ mod tests { assert!(http_request.headers().is_empty()); } + #[test] + fn preserves_user_agent() { + let mut r = Request::new(1); + + r.metadata_mut().insert( + MetadataKey::from_static("user-agent"), + MetadataValue::from_static("Custom/1.2.3"), + ); + + let http_request = r.into_http( + Uri::default(), + http::Method::POST, + http::Version::HTTP_2, + SanitizeHeaders::Yes, + ); + let user_agent = http_request.headers().get("user-agent").unwrap(); + assert_eq!(user_agent, "Custom/1.2.3"); + } + #[test] fn duration_to_grpc_timeout_less_than_second() { let timeout = Duration::from_millis(500); diff --git a/tonic/src/status.rs b/tonic/src/status.rs index 057e7a542..ab12fd613 100644 --- a/tonic/src/status.rs +++ b/tonic/src/status.rs @@ -35,7 +35,11 @@ const ENCODING_SET: &AsciiSet = &CONTROLS /// assert_eq!(status1.code(), status2.code()); /// ``` #[derive(Clone)] -pub struct Status { +pub struct Status(Box); + +/// Box the contents of Status to avoid large error variants +#[derive(Clone)] +struct StatusInner { /// The gRPC status code, found in the `grpc-status` header. code: Code, /// A relevant error message, found in the `grpc-message` header. @@ -50,6 +54,12 @@ pub struct Status { source: Option>, } +impl StatusInner { + fn into_status(self) -> Status { + Status(Box::new(self)) + } +} + /// gRPC status codes used by [`Status`]. /// /// These variants match the [gRPC status codes]. @@ -160,13 +170,14 @@ impl std::fmt::Display for Code { impl Status { /// Create a new `Status` with the associated code and message. pub fn new(code: Code, message: impl Into) -> Status { - Status { + StatusInner { code, message: message.into(), details: Bytes::new(), metadata: MetadataMap::new(), source: None, } + .into_status() } /// The operation completed successfully. @@ -318,7 +329,7 @@ impl Status { pub fn from_error(err: Box) -> Status { Status::try_from_error(err).unwrap_or_else(|err| { let mut status = Status::new(Code::Unknown, err.to_string()); - status.source = Some(err.into()); + status.0.source = Some(err.into()); status }) } @@ -348,8 +359,20 @@ impl Status { Err(err) => err, }; + // If the load shed middleware is enabled, respond to + // service overloaded with an appropriate grpc status. + #[cfg(feature = "server")] + let err = match err.downcast::() { + Ok(_) => { + return Ok(Status::resource_exhausted( + "Too many active requests for the connection", + )); + } + Err(err) => err, + }; + if let Some(mut status) = find_status_in_source_chain(&*err) { - status.source = Some(err.into()); + status.0.source = Some(err.into()); return Ok(status); } @@ -361,8 +384,8 @@ impl Status { fn from_h2_error(err: Box) -> Status { let code = Self::code_from_h2(&err); - let mut status = Self::new(code, format!("h2 protocol error: {}", err)); - status.source = Some(Arc::new(*err)); + let mut status = Self::new(code, format!("h2 protocol error: {err}")); + status.0.source = Some(Arc::new(*err)); status } @@ -389,7 +412,7 @@ impl Status { #[cfg(feature = "server")] fn to_h2_error(&self) -> h2::Error { // conservatively transform to h2 error codes... - let reason = match self.code { + let reason = match self.code() { Code::Cancelled => h2::Reason::CANCEL, _ => h2::Reason::INTERNAL_ERROR, }; @@ -421,7 +444,7 @@ impl Status { #[cfg(feature = "server")] if let Some(h2_err) = err.source().and_then(|e| e.downcast_ref::()) { let code = Status::code_from_h2(h2_err); - let status = Self::new(code, format!("h2 protocol error: {}", err)); + let status = Self::new(code, format!("h2 protocol error: {err}")); return Some(status); } @@ -473,53 +496,56 @@ impl Status { } }; - Some(Status { - code, - message, - details, - metadata: MetadataMap::from_headers(other_headers), - source: None, - }) + Some( + StatusInner { + code, + message, + details, + metadata: MetadataMap::from_headers(other_headers), + source: None, + } + .into_status(), + ) } /// Get the gRPC `Code` of this `Status`. pub fn code(&self) -> Code { - self.code + self.0.code } /// Get the text error message of this `Status`. pub fn message(&self) -> &str { - &self.message + &self.0.message } /// Get the opaque error details of this `Status`. pub fn details(&self) -> &[u8] { - &self.details + &self.0.details } /// Get a reference to the custom metadata. pub fn metadata(&self) -> &MetadataMap { - &self.metadata + &self.0.metadata } /// Get a mutable reference to the custom metadata. pub fn metadata_mut(&mut self) -> &mut MetadataMap { - &mut self.metadata + &mut self.0.metadata } pub(crate) fn to_header_map(&self) -> Result { - let mut header_map = HeaderMap::with_capacity(3 + self.metadata.len()); + let mut header_map = HeaderMap::with_capacity(3 + self.0.metadata.len()); self.add_header(&mut header_map)?; Ok(header_map) } /// Add headers from this `Status` into `header_map`. pub fn add_header(&self, header_map: &mut HeaderMap) -> Result<(), Self> { - header_map.extend(self.metadata.clone().into_sanitized_headers()); + header_map.extend(self.0.metadata.clone().into_sanitized_headers()); - header_map.insert(Self::GRPC_STATUS, self.code.to_header_value()); + header_map.insert(Self::GRPC_STATUS, self.0.code.to_header_value()); - if !self.message.is_empty() { + if !self.0.message.is_empty() { let to_write = Bytes::copy_from_slice( Cow::from(percent_encode(self.message().as_bytes(), ENCODING_SET)).as_bytes(), ); @@ -530,8 +556,8 @@ impl Status { ); } - if !self.details.is_empty() { - let details = crate::util::base64::STANDARD_NO_PAD.encode(&self.details[..]); + if !self.0.details.is_empty() { + let details = crate::util::base64::STANDARD_NO_PAD.encode(&self.0.details[..]); header_map.insert( Self::GRPC_STATUS_DETAILS, @@ -559,18 +585,19 @@ impl Status { details: Bytes, metadata: MetadataMap, ) -> Status { - Status { + StatusInner { code, message: message.into(), details, metadata, source: None, } + .into_status() } /// Add a source error to this status. pub fn set_source(&mut self, source: Arc) -> &mut Status { - self.source = Some(source); + self.0.source = Some(source); self } @@ -598,15 +625,18 @@ fn find_status_in_source_chain(err: &(dyn Error + 'static)) -> Option { while let Some(err) = source { if let Some(status) = err.downcast_ref::() { - return Some(Status { - code: status.code, - message: status.message.clone(), - details: status.details.clone(), - metadata: status.metadata.clone(), - // Since `Status` is not `Clone`, any `source` on the original Status - // cannot be cloned so must remain with the original `Status`. - source: None, - }); + return Some( + StatusInner { + code: status.0.code, + message: status.0.message.clone(), + details: status.0.details.clone(), + metadata: status.0.metadata.clone(), + // Since `Status` is not `Clone`, any `source` on the original Status + // cannot be cloned so must remain with the original `Status`. + source: None, + } + .into_status(), + ); } if let Some(timeout) = err.downcast_ref::() { @@ -637,6 +667,12 @@ fn find_status_in_source_chain(err: &(dyn Error + 'static)) -> Option { } impl fmt::Debug for Status { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Debug for StatusInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // A manual impl to reduce the noise of frequently empty fields. let mut builder = f.debug_struct("Status"); @@ -725,7 +761,7 @@ impl fmt::Display for Status { impl Error for Status { fn source(&self) -> Option<&(dyn Error + 'static)> { - self.source.as_ref().map(|err| (&**err) as _) + self.0.source.as_ref().map(|err| (&**err) as _) } } diff --git a/tonic/src/transport/channel/endpoint.rs b/tonic/src/transport/channel/endpoint.rs index 3a919e93d..f5c386899 100644 --- a/tonic/src/transport/channel/endpoint.rs +++ b/tonic/src/transport/channel/endpoint.rs @@ -39,6 +39,8 @@ pub struct Endpoint { pub(crate) init_stream_window_size: Option, pub(crate) init_connection_window_size: Option, pub(crate) tcp_keepalive: Option, + pub(crate) tcp_keepalive_interval: Option, + pub(crate) tcp_keepalive_retries: Option, pub(crate) tcp_nodelay: bool, pub(crate) http2_keep_alive_interval: Option, pub(crate) http2_keep_alive_timeout: Option, @@ -62,7 +64,7 @@ impl Endpoint { let me = dst.try_into().map_err(|e| Error::from_source(e.into()))?; #[cfg(feature = "_tls-any")] if let EndpointType::Uri(uri) = &me.uri { - if uri.scheme() == Some(&http::uri::Scheme::HTTPS) { + if me.tls.is_none() && uri.scheme() == Some(&http::uri::Scheme::HTTPS) { return me.tls_config(ClientTlsConfig::new().with_enabled_roots()); } } @@ -84,6 +86,8 @@ impl Endpoint { init_stream_window_size: None, init_connection_window_size: None, tcp_keepalive: None, + tcp_keepalive_interval: None, + tcp_keepalive_retries: None, tcp_nodelay: true, http2_keep_alive_interval: None, http2_keep_alive_timeout: None, @@ -111,6 +115,8 @@ impl Endpoint { init_stream_window_size: None, init_connection_window_size: None, tcp_keepalive: None, + tcp_keepalive_interval: None, + tcp_keepalive_retries: None, tcp_nodelay: true, http2_keep_alive_interval: None, http2_keep_alive_timeout: None, @@ -258,7 +264,6 @@ impl Endpoint { /// probes. /// /// Default is no keepalive (`None`) - /// pub fn tcp_keepalive(self, tcp_keepalive: Option) -> Self { Endpoint { tcp_keepalive, @@ -266,6 +271,31 @@ impl Endpoint { } } + /// Set the duration between two successive TCP keepalive retransmissions, + /// if acknowledgement to the previous keepalive transmission is not received. + /// + /// This is only used if `tcp_keepalive` is not None. + /// + /// Defaults to None, which is the system default. + pub fn tcp_keepalive_interval(self, tcp_keepalive_interval: Option) -> Self { + Endpoint { + tcp_keepalive_interval, + ..self + } + } + + /// Set the number of retransmissions to be carried out before declaring that remote end is not available. + /// + /// This is only used if `tcp_keepalive` is not None. + /// + /// Defaults to None, which is the system default. + pub fn tcp_keepalive_retries(self, tcp_keepalive_retries: Option) -> Self { + Endpoint { + tcp_keepalive_retries, + ..self + } + } + /// Apply a concurrency limit to each request. /// /// ``` @@ -428,6 +458,8 @@ impl Endpoint { http.enforce_http(false); http.set_nodelay(self.tcp_nodelay); http.set_keepalive(self.tcp_keepalive); + http.set_keepalive_interval(self.tcp_keepalive_interval); + http.set_keepalive_retries(self.tcp_keepalive_retries); http.set_connect_timeout(self.connect_timeout); http.set_local_address(self.local_address); self.connector(http) @@ -543,6 +575,16 @@ impl Endpoint { pub fn get_tcp_keepalive(&self) -> Option { self.tcp_keepalive } + + /// Get whether TCP keepalive interval. + pub fn get_tcp_keepalive_interval(&self) -> Option { + self.tcp_keepalive_interval + } + + /// Get whether TCP keepalive retries. + pub fn get_tcp_keepalive_retries(&self) -> Option { + self.tcp_keepalive_retries + } } impl From for Endpoint { diff --git a/tonic/src/transport/channel/service/tls.rs b/tonic/src/transport/channel/service/tls.rs index 7510099a1..c3de308be 100644 --- a/tonic/src/transport/channel/service/tls.rs +++ b/tonic/src/transport/channel/service/tls.rs @@ -1,8 +1,9 @@ use std::fmt; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use hyper_util::rt::TokioIo; use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::time; use tokio_rustls::{ rustls::{ crypto, @@ -23,6 +24,7 @@ pub(crate) struct TlsConnector { config: Arc, domain: Arc>, assume_http2: bool, + timeout: Option, } impl TlsConnector { @@ -34,6 +36,7 @@ impl TlsConnector { domain: &str, assume_http2: bool, use_key_log: bool, + timeout: Option, #[cfg(feature = "tls-native-roots")] with_native_roots: bool, #[cfg(feature = "tls-webpki-roots")] with_webpki_roots: bool, ) -> Result { @@ -98,6 +101,7 @@ impl TlsConnector { config: Arc::new(config), domain: Arc::new(ServerName::try_from(domain)?.to_owned()), assume_http2, + timeout, }) } @@ -105,9 +109,14 @@ impl TlsConnector { where I: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - let io = RustlsConnector::from(self.config.clone()) - .connect(self.domain.as_ref().to_owned(), io) - .await?; + let conn_fut = + RustlsConnector::from(self.config.clone()).connect(self.domain.as_ref().to_owned(), io); + let io = match self.timeout { + Some(timeout) => time::timeout(timeout, conn_fut) + .await + .map_err(|_| TlsError::HandshakeTimeout)?, + None => conn_fut.await, + }?; // Generally we require ALPN to be negotiated, but if the user has // explicitly set `assume_http2` to true, we'll allow it to be missing. diff --git a/tonic/src/transport/channel/service/user_agent.rs b/tonic/src/transport/channel/service/user_agent.rs index bbcc1caca..8217d55e6 100644 --- a/tonic/src/transport/channel/service/user_agent.rs +++ b/tonic/src/transport/channel/service/user_agent.rs @@ -77,7 +77,7 @@ mod tests { fn prepends_custom_user_agent_to_default() { assert_eq!( UserAgent::new(Svc, Some(HeaderValue::from_static("Greeter 1.1"))).user_agent, - HeaderValue::from_str(&format!("Greeter 1.1 {}", TONIC_USER_AGENT)).unwrap() + HeaderValue::from_str(&format!("Greeter 1.1 {TONIC_USER_AGENT}")).unwrap() ) } @@ -115,7 +115,7 @@ mod tests { #[tokio::test] async fn sets_custom_user_agent_if_none_present() { - let expected_user_agent = format!("Greeter 1.1 {}", TONIC_USER_AGENT); + let expected_user_agent = format!("Greeter 1.1 {TONIC_USER_AGENT}"); let mut ua = UserAgent::new( TestSvc { expected_user_agent, @@ -131,7 +131,7 @@ mod tests { req.headers_mut() .insert(USER_AGENT, HeaderValue::from_static("request-ua/x.y")); - let expected_user_agent = format!("request-ua/x.y {}", TONIC_USER_AGENT); + let expected_user_agent = format!("request-ua/x.y {TONIC_USER_AGENT}"); let mut ua = UserAgent::new( TestSvc { expected_user_agent, @@ -147,7 +147,7 @@ mod tests { req.headers_mut() .insert(USER_AGENT, HeaderValue::from_static("request-ua/x.y")); - let expected_user_agent = format!("request-ua/x.y Greeter 1.1 {}", TONIC_USER_AGENT); + let expected_user_agent = format!("request-ua/x.y Greeter 1.1 {TONIC_USER_AGENT}"); let mut ua = UserAgent::new( TestSvc { expected_user_agent, diff --git a/tonic/src/transport/channel/tls.rs b/tonic/src/transport/channel/tls.rs index 945384fd2..794ec1f9d 100644 --- a/tonic/src/transport/channel/tls.rs +++ b/tonic/src/transport/channel/tls.rs @@ -4,6 +4,7 @@ use crate::transport::{ Error, }; use http::Uri; +use std::time::Duration; use tokio_rustls::rustls::pki_types::TrustAnchor; /// Configures TLS settings for endpoints. @@ -19,6 +20,7 @@ pub struct ClientTlsConfig { #[cfg(feature = "tls-webpki-roots")] with_webpki_roots: bool, use_key_log: bool, + timeout: Option, } impl ClientTlsConfig { @@ -113,14 +115,24 @@ impl ClientTlsConfig { /// Activates all TLS roots enabled through `tls-*-roots` feature flags pub fn with_enabled_roots(self) -> Self { - let config = ClientTlsConfig::new(); + let config = self; + #[cfg(feature = "tls-native-roots")] let config = config.with_native_roots(); #[cfg(feature = "tls-webpki-roots")] let config = config.with_webpki_roots(); + config } + /// Sets the timeout for the TLS handshake. + pub fn timeout(self, timeout: Duration) -> Self { + ClientTlsConfig { + timeout: Some(timeout), + ..self + } + } + pub(crate) fn into_tls_connector(self, uri: &Uri) -> Result { let domain = match &self.domain { Some(domain) => domain, @@ -133,6 +145,7 @@ impl ClientTlsConfig { domain, self.assume_http2, self.use_key_log, + self.timeout, #[cfg(feature = "tls-native-roots")] self.with_native_roots, #[cfg(feature = "tls-webpki-roots")] diff --git a/tonic/src/transport/server/incoming.rs b/tonic/src/transport/server/incoming.rs index fb63d7480..3e03ce782 100644 --- a/tonic/src/transport/server/incoming.rs +++ b/tonic/src/transport/server/incoming.rs @@ -19,6 +19,9 @@ pub struct TcpIncoming { inner: TcpListenerStream, nodelay: Option, keepalive: Option, + keepalive_time: Option, + keepalive_interval: Option, + keepalive_retries: Option, } impl TcpIncoming { @@ -66,9 +69,42 @@ impl TcpIncoming { } /// Sets the `TCP_KEEPALIVE` option on the accepted connection. - pub fn with_keepalive(self, keepalive: Option) -> Self { - let keepalive = keepalive.map(|t| TcpKeepalive::new().with_time(t)); - Self { keepalive, ..self } + pub fn with_keepalive(self, keepalive_time: Option) -> Self { + Self { + keepalive_time, + keepalive: make_keepalive( + keepalive_time, + self.keepalive_interval, + self.keepalive_retries, + ), + ..self + } + } + + /// Sets the `TCP_KEEPINTVL` option on the accepted connection. + pub fn with_keepalive_interval(self, keepalive_interval: Option) -> Self { + Self { + keepalive_interval, + keepalive: make_keepalive( + self.keepalive_time, + keepalive_interval, + self.keepalive_retries, + ), + ..self + } + } + + /// Sets the `TCP_KEEPCNT` option on the accepted connection. + pub fn with_keepalive_retries(self, keepalive_retries: Option) -> Self { + Self { + keepalive_retries, + keepalive: make_keepalive( + self.keepalive_time, + self.keepalive_interval, + keepalive_retries, + ), + ..self + } } /// Returns the local address that this tcp incoming is bound to. @@ -83,6 +119,9 @@ impl From for TcpIncoming { inner: TcpListenerStream::new(listener), nodelay: None, keepalive: None, + keepalive_time: None, + keepalive_interval: None, + keepalive_retries: None, } } } @@ -121,6 +160,70 @@ fn set_accepted_socket_options( } } +fn make_keepalive( + keepalive_time: Option, + keepalive_interval: Option, + keepalive_retries: Option, +) -> Option { + let mut dirty = false; + let mut keepalive = TcpKeepalive::new(); + if let Some(t) = keepalive_time { + keepalive = keepalive.with_time(t); + dirty = true; + } + + #[cfg( + // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#511-525 + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "visionos", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + target_os = "tvos", + target_os = "watchos", + target_os = "windows", + ) + )] + if let Some(t) = keepalive_interval { + keepalive = keepalive.with_interval(t); + dirty = true; + } + + #[cfg( + // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#557-570 + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "visionos", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + target_os = "tvos", + target_os = "watchos", + ) + )] + if let Some(r) = keepalive_retries { + keepalive = keepalive.with_retries(r); + dirty = true; + } + + // avoid clippy errors for targets that do not use these fields. + let _ = keepalive_retries; + let _ = keepalive_interval; + + dirty.then_some(keepalive) +} + #[cfg(test)] mod tests { use crate::transport::server::TcpIncoming; diff --git a/tonic/src/transport/server/mod.rs b/tonic/src/transport/server/mod.rs index ec4a884b8..ee978baa3 100644 --- a/tonic/src/transport/server/mod.rs +++ b/tonic/src/transport/server/mod.rs @@ -66,6 +66,7 @@ use tower::{ layer::util::{Identity, Stack}, layer::Layer, limit::concurrency::ConcurrencyLimitLayer, + load_shed::LoadShedLayer, util::BoxCloneService, Service, ServiceBuilder, ServiceExt, }; @@ -87,6 +88,7 @@ const DEFAULT_HTTP2_KEEPALIVE_TIMEOUT: Duration = Duration::from_secs(20); pub struct Server { trace_interceptor: Option, concurrency_limit: Option, + load_shed: bool, timeout: Option, #[cfg(feature = "_tls-any")] tls: Option, @@ -111,6 +113,7 @@ impl Default for Server { Self { trace_interceptor: None, concurrency_limit: None, + load_shed: false, timeout: None, #[cfg(feature = "_tls-any")] tls: None, @@ -179,6 +182,27 @@ impl Server { } } + /// Enable or disable load shedding. The default is disabled. + /// + /// When load shedding is enabled, if the service responds with not ready + /// the request will immediately be rejected with a + /// [`resource_exhausted`](https://docs.rs/tonic/latest/tonic/struct.Status.html#method.resource_exhausted) error. + /// The default is to buffer requests. This is especially useful in combination with + /// setting a concurrency limit per connection. + /// + /// # Example + /// + /// ``` + /// # use tonic::transport::Server; + /// # use tower_service::Service; + /// # let builder = Server::builder(); + /// builder.load_shed(true); + /// ``` + #[must_use] + pub fn load_shed(self, load_shed: bool) -> Self { + Server { load_shed, ..self } + } + /// Set a timeout on for all request handlers. /// /// # Example @@ -320,6 +344,8 @@ impl Server { /// specified will be the time to remain idle before sending TCP keepalive /// probes. /// + /// Important: This setting is only respected when not using `serve_with_incoming`. + /// /// Default is no keepalive (`None`) /// #[must_use] @@ -514,6 +540,7 @@ impl Server { service_builder: self.service_builder.layer(new_layer), trace_interceptor: self.trace_interceptor, concurrency_limit: self.concurrency_limit, + load_shed: self.load_shed, timeout: self.timeout, #[cfg(feature = "_tls-any")] tls: self.tls, @@ -643,6 +670,7 @@ impl Server { { let trace_interceptor = self.trace_interceptor.clone(); let concurrency_limit = self.concurrency_limit; + let load_shed = self.load_shed; let init_connection_window_size = self.init_connection_window_size; let init_stream_window_size = self.init_stream_window_size; let max_concurrent_streams = self.max_concurrent_streams; @@ -667,6 +695,7 @@ impl Server { let mut svc = MakeSvc { inner: svc, concurrency_limit, + load_shed, timeout, trace_interceptor, _io: PhantomData, @@ -1047,6 +1076,7 @@ impl fmt::Debug for Svc { #[derive(Clone)] struct MakeSvc { concurrency_limit: Option, + load_shed: bool, timeout: Option, inner: S, trace_interceptor: Option, @@ -1080,6 +1110,7 @@ where let svc = ServiceBuilder::new() .layer(RecoverErrorLayer::new()) + .option_layer(self.load_shed.then_some(LoadShedLayer::new())) .option_layer(concurrency_limit.map(ConcurrencyLimitLayer::new)) .layer_fn(|s| GrpcTimeout::new(s, timeout)) .service(svc); diff --git a/tonic/src/transport/server/service/tls.rs b/tonic/src/transport/server/service/tls.rs index 42c376249..224be98a4 100644 --- a/tonic/src/transport/server/service/tls.rs +++ b/tonic/src/transport/server/service/tls.rs @@ -1,6 +1,7 @@ -use std::{fmt, sync::Arc}; +use std::{fmt, sync::Arc, time::Duration}; use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::time; use tokio_rustls::{ rustls::{server::WebPkiClientVerifier, RootCertStore, ServerConfig}, server::TlsStream, @@ -8,13 +9,16 @@ use tokio_rustls::{ }; use crate::transport::{ - service::tls::{convert_certificate_to_pki_types, convert_identity_to_pki_types, ALPN_H2}, + service::tls::{ + convert_certificate_to_pki_types, convert_identity_to_pki_types, TlsError, ALPN_H2, + }, Certificate, Identity, }; #[derive(Clone)] pub(crate) struct TlsAcceptor { inner: Arc, + timeout: Option, } impl TlsAcceptor { @@ -24,6 +28,7 @@ impl TlsAcceptor { client_auth_optional: bool, ignore_client_order: bool, use_key_log: bool, + timeout: Option, ) -> Result { let builder = ServerConfig::builder(); @@ -53,6 +58,7 @@ impl TlsAcceptor { config.alpn_protocols.push(ALPN_H2.into()); Ok(Self { inner: Arc::new(config), + timeout, }) } @@ -61,7 +67,14 @@ impl TlsAcceptor { IO: AsyncRead + AsyncWrite + Unpin, { let acceptor = RustlsAcceptor::from(self.inner.clone()); - acceptor.accept(io).await.map_err(Into::into) + let accept_fut = acceptor.accept(io); + match self.timeout { + Some(timeout) => time::timeout(timeout, accept_fut) + .await + .map_err(|_| TlsError::HandshakeTimeout)?, + None => accept_fut.await, + } + .map_err(Into::into) } } diff --git a/tonic/src/transport/server/tls.rs b/tonic/src/transport/server/tls.rs index 2e1ef8213..969364e1f 100644 --- a/tonic/src/transport/server/tls.rs +++ b/tonic/src/transport/server/tls.rs @@ -1,4 +1,4 @@ -use std::fmt; +use std::{fmt, time::Duration}; use super::service::TlsAcceptor; use crate::transport::tls::{Certificate, Identity}; @@ -11,6 +11,7 @@ pub struct ServerTlsConfig { client_auth_optional: bool, ignore_client_order: bool, use_key_log: bool, + timeout: Option, } impl fmt::Debug for ServerTlsConfig { @@ -73,6 +74,14 @@ impl ServerTlsConfig { } } + /// Sets the timeout for the TLS handshake. + pub fn timeout(self, timeout: Duration) -> Self { + ServerTlsConfig { + timeout: Some(timeout), + ..self + } + } + pub(crate) fn tls_acceptor(&self) -> Result { TlsAcceptor::new( self.identity.as_ref().unwrap(), @@ -80,6 +89,7 @@ impl ServerTlsConfig { self.client_auth_optional, self.ignore_client_order, self.use_key_log, + self.timeout, ) } } diff --git a/tonic/src/transport/service/tls.rs b/tonic/src/transport/service/tls.rs index 8cb30c73c..0d6e9bc87 100644 --- a/tonic/src/transport/service/tls.rs +++ b/tonic/src/transport/service/tls.rs @@ -15,6 +15,7 @@ pub(crate) enum TlsError { NativeCertsNotFound, CertificateParseError, PrivateKeyParseError, + HandshakeTimeout, } impl fmt::Display for TlsError { @@ -29,6 +30,7 @@ impl fmt::Display for TlsError { f, "Error parsing TLS private key - no RSA or PKCS8-encoded keys found." ), + TlsError::HandshakeTimeout => write!(f, "TLS handshake timeout."), } } }