|
| 1 | +use crate::{ |
| 2 | + index::{accepted, no_matching_target, ratelimit_already_exists, SharedIndex, Update}, |
| 3 | + resource_id::NamespaceGroupKindName, |
| 4 | + tests::{default_cluster_networks, make_server}, |
| 5 | + Index, IndexMetrics, |
| 6 | +}; |
| 7 | +use chrono::{DateTime, Utc}; |
| 8 | +use kubert::index::IndexNamespacedResource; |
| 9 | +use linkerd_policy_controller_core::routes::GroupKindName; |
| 10 | +use linkerd_policy_controller_k8s_api::{ |
| 11 | + self as k8s_core_api, |
| 12 | + policy::{self as linkerd_k8s_api}, |
| 13 | + Resource, |
| 14 | +}; |
| 15 | +use std::sync::Arc; |
| 16 | +use tokio::sync::{ |
| 17 | + mpsc::{self, Receiver}, |
| 18 | + watch, |
| 19 | +}; |
| 20 | + |
| 21 | +#[test] |
| 22 | +fn ratelimit_accepted() { |
| 23 | + let (index, mut updates_rx) = make_index_updates_rx(); |
| 24 | + |
| 25 | + // create server |
| 26 | + let server = make_server( |
| 27 | + "ns", |
| 28 | + "server-1", |
| 29 | + 8080, |
| 30 | + vec![("app", "server")], |
| 31 | + vec![], |
| 32 | + None, |
| 33 | + ); |
| 34 | + index.write().apply(server); |
| 35 | + |
| 36 | + // create an associated rate limit |
| 37 | + let (ratelimit_id, ratelimit) = make_ratelimit("rl-1".to_string(), "server-1".to_string()); |
| 38 | + index.write().apply(ratelimit); |
| 39 | + |
| 40 | + let expected_status = linkerd_k8s_api::HTTPLocalRateLimitPolicyStatus { |
| 41 | + conditions: vec![accepted()], |
| 42 | + target_ref: linkerd_k8s_api::LocalTargetRef { |
| 43 | + group: Some("policy.linkerd.io".to_string()), |
| 44 | + kind: "Server".to_string(), |
| 45 | + name: "server-1".to_string(), |
| 46 | + }, |
| 47 | + }; |
| 48 | + |
| 49 | + let expected_patch = crate::index::make_patch(&ratelimit_id, expected_status).unwrap(); |
| 50 | + |
| 51 | + let update = updates_rx.try_recv().unwrap(); |
| 52 | + assert_eq!(ratelimit_id, update.id); |
| 53 | + assert_eq!(expected_patch, update.patch); |
| 54 | + assert!(updates_rx.try_recv().is_err()) |
| 55 | +} |
| 56 | + |
| 57 | +#[test] |
| 58 | +fn ratelimit_not_accepted_no_matching_target() { |
| 59 | + let (index, mut updates_rx) = make_index_updates_rx(); |
| 60 | + |
| 61 | + // create server |
| 62 | + let server = make_server( |
| 63 | + "ns", |
| 64 | + "server-1", |
| 65 | + 8080, |
| 66 | + vec![("app", "server")], |
| 67 | + vec![], |
| 68 | + None, |
| 69 | + ); |
| 70 | + index.write().apply(server); |
| 71 | + |
| 72 | + // create an associated rate limit |
| 73 | + let (ratelimit_id, ratelimit) = make_ratelimit("rl-1".to_string(), "server-2".to_string()); |
| 74 | + index.write().apply(ratelimit); |
| 75 | + |
| 76 | + let expected_status = linkerd_k8s_api::HTTPLocalRateLimitPolicyStatus { |
| 77 | + conditions: vec![no_matching_target()], |
| 78 | + target_ref: linkerd_k8s_api::LocalTargetRef { |
| 79 | + group: Some("policy.linkerd.io".to_string()), |
| 80 | + kind: "Server".to_string(), |
| 81 | + name: "server-2".to_string(), |
| 82 | + }, |
| 83 | + }; |
| 84 | + |
| 85 | + let expected_patch = crate::index::make_patch(&ratelimit_id, expected_status).unwrap(); |
| 86 | + |
| 87 | + let update = updates_rx.try_recv().unwrap(); |
| 88 | + assert_eq!(ratelimit_id, update.id); |
| 89 | + assert_eq!(expected_patch, update.patch); |
| 90 | + assert!(updates_rx.try_recv().is_err()) |
| 91 | +} |
| 92 | + |
| 93 | +#[test] |
| 94 | +fn ratelimit_not_accepted_already_exists() { |
| 95 | + let (index, mut updates_rx) = make_index_updates_rx(); |
| 96 | + |
| 97 | + // create server |
| 98 | + let server = make_server( |
| 99 | + "ns", |
| 100 | + "server-1", |
| 101 | + 8080, |
| 102 | + vec![("app", "server")], |
| 103 | + vec![], |
| 104 | + None, |
| 105 | + ); |
| 106 | + index.write().apply(server); |
| 107 | + |
| 108 | + // create an associated rate limit |
| 109 | + let (rl_1_id, rl_1) = make_ratelimit("rl-1".to_string(), "server-1".to_string()); |
| 110 | + index.write().apply(rl_1); |
| 111 | + |
| 112 | + let expected_status = linkerd_k8s_api::HTTPLocalRateLimitPolicyStatus { |
| 113 | + conditions: vec![accepted()], |
| 114 | + target_ref: linkerd_k8s_api::LocalTargetRef { |
| 115 | + group: Some("policy.linkerd.io".to_string()), |
| 116 | + kind: "Server".to_string(), |
| 117 | + name: "server-1".to_string(), |
| 118 | + }, |
| 119 | + }; |
| 120 | + |
| 121 | + let rl_1_expected_patch = crate::index::make_patch(&rl_1_id, expected_status).unwrap(); |
| 122 | + |
| 123 | + let update = updates_rx.try_recv().unwrap(); |
| 124 | + assert_eq!(rl_1_id, update.id); |
| 125 | + assert_eq!(rl_1_expected_patch, update.patch); |
| 126 | + assert!(updates_rx.try_recv().is_err()); |
| 127 | + |
| 128 | + // create another rate limit for the same server |
| 129 | + let (rl_2_id, rl_2) = make_ratelimit("rl-2".to_string(), "server-1".to_string()); |
| 130 | + index.write().apply(rl_2); |
| 131 | + |
| 132 | + let expected_status = linkerd_k8s_api::HTTPLocalRateLimitPolicyStatus { |
| 133 | + conditions: vec![ratelimit_already_exists()], |
| 134 | + target_ref: linkerd_k8s_api::LocalTargetRef { |
| 135 | + group: Some("policy.linkerd.io".to_string()), |
| 136 | + kind: "Server".to_string(), |
| 137 | + name: "server-1".to_string(), |
| 138 | + }, |
| 139 | + }; |
| 140 | + |
| 141 | + let rl_2_expected_patch = crate::index::make_patch(&rl_2_id, expected_status).unwrap(); |
| 142 | + |
| 143 | + let update_1 = updates_rx.try_recv().unwrap(); |
| 144 | + let update_2 = updates_rx.try_recv().unwrap(); |
| 145 | + assert!(updates_rx.try_recv().is_err()); |
| 146 | + |
| 147 | + // we should receive updates for both rate limits in any order |
| 148 | + if update_1.id == rl_1_id { |
| 149 | + assert_eq!(rl_1_id, update_1.id); |
| 150 | + assert_eq!(rl_1_expected_patch, update_1.patch); |
| 151 | + assert_eq!(rl_2_id, update_2.id); |
| 152 | + assert_eq!(rl_2_expected_patch, update_2.patch); |
| 153 | + } else { |
| 154 | + assert_eq!(rl_1_id, update_2.id); |
| 155 | + assert_eq!(rl_1_expected_patch, update_2.patch); |
| 156 | + assert_eq!(rl_2_id, update_1.id); |
| 157 | + assert_eq!(rl_2_expected_patch, update_1.patch); |
| 158 | + } |
| 159 | +} |
| 160 | + |
| 161 | +fn make_index_updates_rx() -> (SharedIndex, Receiver<Update>) { |
| 162 | + let hostname = "test"; |
| 163 | + let claim = kubert::lease::Claim { |
| 164 | + holder: "test".to_string(), |
| 165 | + expiry: DateTime::<Utc>::MAX_UTC, |
| 166 | + }; |
| 167 | + let (_claims_tx, claims_rx) = watch::channel(Arc::new(claim)); |
| 168 | + let (updates_tx, updates_rx) = mpsc::channel(10000); |
| 169 | + let index = Index::shared( |
| 170 | + hostname, |
| 171 | + claims_rx, |
| 172 | + updates_tx, |
| 173 | + IndexMetrics::register(&mut Default::default()), |
| 174 | + default_cluster_networks(), |
| 175 | + ); |
| 176 | + |
| 177 | + (index, updates_rx) |
| 178 | +} |
| 179 | + |
| 180 | +fn make_ratelimit( |
| 181 | + name: String, |
| 182 | + server: String, |
| 183 | +) -> ( |
| 184 | + NamespaceGroupKindName, |
| 185 | + linkerd_k8s_api::HTTPLocalRateLimitPolicy, |
| 186 | +) { |
| 187 | + let ratelimit_id = NamespaceGroupKindName { |
| 188 | + namespace: "ns".to_string(), |
| 189 | + gkn: GroupKindName { |
| 190 | + group: linkerd_k8s_api::HTTPLocalRateLimitPolicy::group(&()), |
| 191 | + kind: linkerd_k8s_api::HTTPLocalRateLimitPolicy::kind(&()), |
| 192 | + name: name.clone().into(), |
| 193 | + }, |
| 194 | + }; |
| 195 | + |
| 196 | + let ratelimit = linkerd_k8s_api::HTTPLocalRateLimitPolicy { |
| 197 | + metadata: k8s_core_api::ObjectMeta { |
| 198 | + name: Some(name), |
| 199 | + namespace: Some("ns".to_string()), |
| 200 | + ..Default::default() |
| 201 | + }, |
| 202 | + spec: linkerd_k8s_api::RateLimitPolicySpec { |
| 203 | + target_ref: linkerd_k8s_api::LocalTargetRef { |
| 204 | + group: Some("policy.linkerd.io".to_string()), |
| 205 | + kind: "Server".to_string(), |
| 206 | + name: server, |
| 207 | + }, |
| 208 | + total: Some(linkerd_k8s_api::Limit { |
| 209 | + requests_per_second: 1, |
| 210 | + }), |
| 211 | + identity: None, |
| 212 | + overrides: None, |
| 213 | + }, |
| 214 | + status: None, |
| 215 | + }; |
| 216 | + |
| 217 | + (ratelimit_id, ratelimit) |
| 218 | +} |
0 commit comments