diff --git a/src/NRedisStack/Bloom/BloomCommands.cs b/src/NRedisStack/Bloom/BloomCommands.cs
index d0765adc..810f13a8 100644
--- a/src/NRedisStack/Bloom/BloomCommands.cs
+++ b/src/NRedisStack/Bloom/BloomCommands.cs
@@ -4,7 +4,7 @@
namespace NRedisStack
{
- public class BloomCommands
+ public class BloomCommands : IBloomCommands
{
IDatabase _db;
public BloomCommands(IDatabase db)
@@ -12,100 +12,49 @@ public BloomCommands(IDatabase db)
_db = db;
}
- ///
- /// Adds an item to a Bloom Filter.
- ///
- /// The key under which the filter is found.
- /// The item to add.
- /// if the item did not exist in the filter, otherwise.
- ///
+ ///
public bool Add(RedisKey key, RedisValue item)
{
return _db.Execute(BF.ADD, key, item).ToString() == "1";
}
- ///
- /// Adds an item to a Bloom Filter.
- ///
- /// The key under which the filter is found.
- /// The item to add.
- /// if the item did not exist in the filter, otherwise.
- ///
+ ///
public async Task AddAsync(RedisKey key, RedisValue item)
{
var result = await _db.ExecuteAsync(BF.ADD, key, item);
return result.ToString() == "1";
}
- ///
- /// Checks whether an item exist in the Bloom Filter or not.
- ///
- /// The name of the filter.
- /// The item to check for.
- /// means the item may exist in the filter,
- /// and means it does not exist in the filter.
- ///
+ ///
public bool Exists(RedisKey key, RedisValue item)
{
return _db.Execute(BF.EXISTS, key, item).ToString() == "1";
}
- ///
- /// Checks whether an item exist in the Bloom Filter or not.
- ///
- /// The name of the filter.
- /// The item to check for.
- /// means the item may exist in the filter,
- /// and means it does not exist in the filter.
- ///
+ ///
public async Task ExistsAsync(RedisKey key, RedisValue item)
{
var result = await _db.ExecuteAsync(BF.EXISTS, key, item);
return result.ToString() == "1";
}
- ///
- /// Return information about a bloom filter.
- ///
- /// Name of the key to return information about.
- /// Information of the filter.
- ///
+ ///
public BloomInformation Info(RedisKey key)
{
return _db.Execute(BF.INFO, key).ToBloomInfo();
}
- ///
- /// Return information about a bloom filter.
- ///
- /// Name of the key to return information about.
- /// Information of the filter.
- ///
+ ///
public async Task InfoAsync(RedisKey key)
{
var info = await _db.ExecuteAsync(BF.INFO, key);
return info.ToBloomInfo();
}
- ///
- /// Adds one or more items to a Bloom Filter. A filter will be created if it does not exist.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// (Optional) Specifies the desired capacity for the filter to be created.
- /// (Optional) Specifies the error ratio of the newly created filter if it does not yet exist.
- /// (Optional) When capacity is reached, an additional sub-filter is
- /// created in size of the last sub-filter multiplied by expansion.
- /// (Optional) to indicates that the
- /// filter should not be created if it does not already exist.
- /// (Optional) toprevent the filter
- /// from creating additional sub-filters if initial capacity is reached.
- /// An array of booleans. Each element is either true or false depending on whether the
- /// corresponding input element was newly added to the filter or may have previously existed.
- ///
+ ///
public bool[] Insert(RedisKey key, RedisValue[] items, int? capacity = null,
- double? error = null, int? expansion = null,
- bool nocreate = false, bool nonscaling = false)
+ double? error = null, int? expansion = null,
+ bool nocreate = false, bool nonscaling = false)
{
if (items.Length < 1)
throw new ArgumentOutOfRangeException(nameof(items));
@@ -115,25 +64,10 @@ public bool[] Insert(RedisKey key, RedisValue[] items, int? capacity = null,
return _db.Execute(BF.INSERT, args).ToBooleanArray();
}
- ///
- /// Adds one or more items to a Bloom Filter. A filter will be created if it does not exist.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// (Optional) Specifies the desired capacity for the filter to be created.
- /// (Optional) Specifies the error ratio of the newly created filter if it does not yet exist.
- /// (Optional) When capacity is reached, an additional sub-filter is
- /// created in size of the last sub-filter multiplied by expansion.
- /// (Optional) to indicates that the
- /// filter should not be created if it does not already exist.
- /// (Optional) toprevent the filter
- /// from creating additional sub-filters if initial capacity is reached.
- /// An array of booleans. Each element is either true or false depending on whether the
- /// corresponding input element was newly added to the filter or may have previously existed.
- ///
+ ///
public async Task InsertAsync(RedisKey key, RedisValue[] items, int? capacity = null,
- double? error = null, int? expansion = null,
- bool nocreate = false, bool nonscaling = false)
+ double? error = null, int? expansion = null,
+ bool nocreate = false, bool nonscaling = false)
{
if (items.Length < 1)
throw new ArgumentOutOfRangeException(nameof(items));
@@ -144,41 +78,20 @@ public async Task InsertAsync(RedisKey key, RedisValue[] items, int? cap
return result.ToBooleanArray();
}
- ///
- /// Restores a filter previosly saved using SCANDUMP.
- ///
- /// Name of the key to restore.
- /// Iterator value associated with data (returned by SCANDUMP).
- /// Current data chunk (returned by SCANDUMP).
- /// if executed correctly, error otherwise/>
- ///
+ ///
public bool LoadChunk(RedisKey key, long iterator, Byte[] data)
{
return _db.Execute(BF.LOADCHUNK, key, iterator, data).OKtoBoolean();
}
- ///
- /// Restores a filter previosly saved using SCANDUMP.
- ///
- /// Name of the key to restore.
- /// Iterator value associated with data (returned by SCANDUMP).
- /// Current data chunk (returned by SCANDUMP).
- /// if executed correctly, error otherwise/>
- ///
+ ///
public async Task LoadChunkAsync(RedisKey key, long iterator, Byte[] data)
{
var result = await _db.ExecuteAsync(BF.LOADCHUNK, key, iterator, data);
return result.OKtoBoolean();
}
- ///
- /// Adds one or more items to the Bloom Filter. A filter will be created if it does not exist yet.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// An array of booleans. Each element is either true or false depending on whether the
- /// corresponding input element was newly added to the filter or may have previously existed.
- ///
+ ///
public bool[] MAdd(RedisKey key, params RedisValue[] items)
{
if (items.Length < 1)
@@ -194,14 +107,7 @@ public bool[] MAdd(RedisKey key, params RedisValue[] items)
return _db.Execute(BF.MADD, args).ToBooleanArray();
}
- ///
- /// Adds one or more items to the Bloom Filter. A filter will be created if it does not exist yet.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// An array of booleans. Each element is either true or false depending on whether the
- /// corresponding input element was newly added to the filter or may have previously existed.
- ///
+ ///
public async Task MAddAsync(RedisKey key, params RedisValue[] items)
{
if (items.Length < 1)
@@ -218,14 +124,7 @@ public async Task MAddAsync(RedisKey key, params RedisValue[] items)
return result.ToBooleanArray();
}
- ///
- /// Checks whether one or more items may exist in the filter or not.
- ///
- /// The name of the filter.
- /// One or more items to check.
- /// An array of booleans, for each item means the item may exist in the filter,
- /// and means the item may exist in the filter.
- ///
+ ///
public bool[] MExists(RedisKey key, RedisValue[] items)
{
if (items.Length < 1)
@@ -242,14 +141,7 @@ public bool[] MExists(RedisKey key, RedisValue[] items)
}
- ///
- /// Checks whether one or more items may exist in the filter or not.
- ///
- /// The name of the filter.
- /// One or more items to check.
- /// An array of booleans, for each item means the item may exist in the filter,
- /// and means the item may exist in the filter.
- ///
+ ///
public async Task MExistsAsync(RedisKey key, RedisValue[] items)
{
if (items.Length < 1)
@@ -267,20 +159,9 @@ public async Task MExistsAsync(RedisKey key, RedisValue[] items)
}
- ///
- /// Creates a new Bloom Filter.
- ///
- /// The key under which the filter is found.
- /// The desired probability for false positives (value between 0 to 1).
- /// The number of entries intended to be added to the filter.
- /// (Optional) When capacity is reached, an additional sub-filter is
- /// created in size of the last sub-filter multiplied by expansion.
- /// (Optional) toprevent the filter
- /// from creating additional sub-filters if initial capacity is reached.
- /// if executed correctly, error otherwise/>
- ///
+ ///
public bool Reserve(RedisKey key, double errorRate, long capacity,
- int? expansion = null, bool nonscaling = false)
+ int? expansion = null, bool nonscaling = false)
{
List args = new List { key, errorRate, capacity };
@@ -297,20 +178,9 @@ public bool Reserve(RedisKey key, double errorRate, long capacity,
return _db.Execute(BF.RESERVE, args).OKtoBoolean();
}
- ///
- /// Creates a new Bloom Filter.
- ///
- /// The key under which the filter is found.
- /// The desired probability for false positives (value between 0 to 1).
- /// The number of entries intended to be added to the filter.
- /// (Optional) When capacity is reached, an additional sub-filter is
- /// created in size of the last sub-filter multiplied by expansion.
- /// (Optional) toprevent the filter
- /// from creating additional sub-filters if initial capacity is reached.
- /// if executed correctly, Error otherwise.
- ///
+ ///
public async Task ReserveAsync(RedisKey key, double errorRate, long capacity,
- int? expansion = null, bool nonscaling = false)
+ int? expansion = null, bool nonscaling = false)
{
List args = new List { key, errorRate, capacity };
@@ -328,25 +198,13 @@ public async Task ReserveAsync(RedisKey key, double errorRate, long capaci
return result.OKtoBoolean();
}
- ///
- /// Restores a filter previosly saved using SCANDUMP.
- ///
- /// Name of the filter.
- /// Iterator value; either 0 or the iterator from a previous invocation of this command.
- /// Tuple of iterator and data.
- ///
+ ///
public Tuple ScanDump(RedisKey key, long iterator)
{
return _db.Execute(BF.SCANDUMP, key, iterator).ToScanDumpTuple();
}
- ///
- /// Restores a filter previosly saved using SCANDUMP.
- ///
- /// Name of the filter.
- /// Iterator value; either 0 or the iterator from a previous invocation of this command.
- /// Tuple of iterator and data.
- ///
+ ///
public async Task> ScanDumpAsync(RedisKey key, long iterator)
{
var result = await _db.ExecuteAsync(BF.SCANDUMP, key, iterator);
diff --git a/src/NRedisStack/Bloom/IBloomCommands.cs b/src/NRedisStack/Bloom/IBloomCommands.cs
new file mode 100644
index 00000000..5995803a
--- /dev/null
+++ b/src/NRedisStack/Bloom/IBloomCommands.cs
@@ -0,0 +1,210 @@
+using NRedisStack.Bloom.DataTypes;
+using StackExchange.Redis;
+
+namespace NRedisStack
+{
+ public interface IBloomCommands
+ {
+ ///
+ /// Adds an item to a Bloom Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The item to add.
+ /// if the item did not exist in the filter, otherwise.
+ ///
+ bool Add(RedisKey key, RedisValue item);
+
+ ///
+ /// Adds an item to a Bloom Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The item to add.
+ /// if the item did not exist in the filter, otherwise.
+ ///
+ Task AddAsync(RedisKey key, RedisValue item);
+
+ ///
+ /// Checks whether an item exist in the Bloom Filter or not.
+ ///
+ /// The name of the filter.
+ /// The item to check for.
+ /// means the item may exist in the filter,
+ /// and means it does not exist in the filter.
+ ///
+ bool Exists(RedisKey key, RedisValue item);
+
+ ///
+ /// Checks whether an item exist in the Bloom Filter or not.
+ ///
+ /// The name of the filter.
+ /// The item to check for.
+ /// means the item may exist in the filter,
+ /// and means it does not exist in the filter.
+ ///
+ Task ExistsAsync(RedisKey key, RedisValue item);
+
+ ///
+ /// Return information about a bloom filter.
+ ///
+ /// Name of the key to return information about.
+ /// Information of the filter.
+ ///
+ BloomInformation Info(RedisKey key);
+
+ ///
+ /// Return information about a bloom filter.
+ ///
+ /// Name of the key to return information about.
+ /// Information of the filter.
+ ///
+ Task InfoAsync(RedisKey key);
+
+ ///
+ /// Adds one or more items to a Bloom Filter. A filter will be created if it does not exist.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// (Optional) Specifies the desired capacity for the filter to be created.
+ /// (Optional) Specifies the error ratio of the newly created filter if it does not yet exist.
+ /// (Optional) When capacity is reached, an additional sub-filter is
+ /// created in size of the last sub-filter multiplied by expansion.
+ /// (Optional) to indicates that the
+ /// filter should not be created if it does not already exist.
+ /// (Optional) toprevent the filter
+ /// from creating additional sub-filters if initial capacity is reached.
+ /// An array of booleans. Each element is either true or false depending on whether the
+ /// corresponding input element was newly added to the filter or may have previously existed.
+ ///
+ bool[] Insert(RedisKey key, RedisValue[] items, int? capacity = null,
+ double? error = null, int? expansion = null,
+ bool nocreate = false, bool nonscaling = false);
+
+ ///
+ /// Adds one or more items to a Bloom Filter. A filter will be created if it does not exist.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// (Optional) Specifies the desired capacity for the filter to be created.
+ /// (Optional) Specifies the error ratio of the newly created filter if it does not yet exist.
+ /// (Optional) When capacity is reached, an additional sub-filter is
+ /// created in size of the last sub-filter multiplied by expansion.
+ /// (Optional) to indicates that the
+ /// filter should not be created if it does not already exist.
+ /// (Optional) toprevent the filter
+ /// from creating additional sub-filters if initial capacity is reached.
+ /// An array of booleans. Each element is either true or false depending on whether the
+ /// corresponding input element was newly added to the filter or may have previously existed.
+ ///
+ Task InsertAsync(RedisKey key, RedisValue[] items, int? capacity = null,
+ double? error = null, int? expansion = null,
+ bool nocreate = false, bool nonscaling = false);
+
+ ///
+ /// Restores a filter previosly saved using SCANDUMP.
+ ///
+ /// Name of the key to restore.
+ /// Iterator value associated with data (returned by SCANDUMP).
+ /// Current data chunk (returned by SCANDUMP).
+ /// if executed correctly, error otherwise/>
+ ///
+ bool LoadChunk(RedisKey key, long iterator, Byte[] data);
+
+ ///
+ /// Restores a filter previosly saved using SCANDUMP.
+ ///
+ /// Name of the key to restore.
+ /// Iterator value associated with data (returned by SCANDUMP).
+ /// Current data chunk (returned by SCANDUMP).
+ /// if executed correctly, error otherwise/>
+ ///
+ Task LoadChunkAsync(RedisKey key, long iterator, Byte[] data);
+
+ ///
+ /// Adds one or more items to the Bloom Filter. A filter will be created if it does not exist yet.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// An array of booleans. Each element is either true or false depending on whether the
+ /// corresponding input element was newly added to the filter or may have previously existed.
+ ///
+ bool[] MAdd(RedisKey key, params RedisValue[] items);
+
+ ///
+ /// Adds one or more items to the Bloom Filter. A filter will be created if it does not exist yet.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// An array of booleans. Each element is either true or false depending on whether the
+ /// corresponding input element was newly added to the filter or may have previously existed.
+ ///
+ Task MAddAsync(RedisKey key, params RedisValue[] items);
+
+ ///
+ /// Checks whether one or more items may exist in the filter or not.
+ ///
+ /// The name of the filter.
+ /// One or more items to check.
+ /// An array of booleans, for each item means the item may exist in the filter,
+ /// and means the item may exist in the filter.
+ ///
+ bool[] MExists(RedisKey key, RedisValue[] items);
+
+ ///
+ /// Checks whether one or more items may exist in the filter or not.
+ ///
+ /// The name of the filter.
+ /// One or more items to check.
+ /// An array of booleans, for each item means the item may exist in the filter,
+ /// and means the item may exist in the filter.
+ ///
+ Task MExistsAsync(RedisKey key, RedisValue[] items);
+
+ ///
+ /// Creates a new Bloom Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The desired probability for false positives (value between 0 to 1).
+ /// The number of entries intended to be added to the filter.
+ /// (Optional) When capacity is reached, an additional sub-filter is
+ /// created in size of the last sub-filter multiplied by expansion.
+ /// (Optional) toprevent the filter
+ /// from creating additional sub-filters if initial capacity is reached.
+ /// if executed correctly, error otherwise/>
+ ///
+ bool Reserve(RedisKey key, double errorRate, long capacity,
+ int? expansion = null, bool nonscaling = false);
+
+ ///
+ /// Creates a new Bloom Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The desired probability for false positives (value between 0 to 1).
+ /// The number of entries intended to be added to the filter.
+ /// (Optional) When capacity is reached, an additional sub-filter is
+ /// created in size of the last sub-filter multiplied by expansion.
+ /// (Optional) toprevent the filter
+ /// from creating additional sub-filters if initial capacity is reached.
+ /// if executed correctly, Error otherwise.
+ ///
+ Task ReserveAsync(RedisKey key, double errorRate, long capacity,
+ int? expansion = null, bool nonscaling = false);
+
+ ///
+ /// Restores a filter previosly saved using SCANDUMP.
+ ///
+ /// Name of the filter.
+ /// Iterator value; either 0 or the iterator from a previous invocation of this command.
+ /// Tuple of iterator and data.
+ ///
+ Tuple ScanDump(RedisKey key, long iterator);
+
+ ///
+ /// Restores a filter previosly saved using SCANDUMP.
+ ///
+ /// Name of the filter.
+ /// Iterator value; either 0 or the iterator from a previous invocation of this command.
+ /// Tuple of iterator and data.
+ ///
+ Task> ScanDumpAsync(RedisKey key, long iterator);
+ }
+}
diff --git a/src/NRedisStack/CountMinSketch/CmsCommands.cs b/src/NRedisStack/CountMinSketch/CmsCommands.cs
index 2e41c866..95bc7b3c 100644
--- a/src/NRedisStack/CountMinSketch/CmsCommands.cs
+++ b/src/NRedisStack/CountMinSketch/CmsCommands.cs
@@ -4,7 +4,7 @@
namespace NRedisStack
{
- public class CmsCommands
+ public class CmsCommands : ICmsCommands
{
IDatabase _db;
public CmsCommands(IDatabase db)
@@ -12,41 +12,20 @@ public CmsCommands(IDatabase db)
_db = db;
}
- ///
- /// Increases the count of item by increment.
- ///
- /// The name of the sketch.
- /// The item which counter is to be increased.
- /// Amount by which the item counter is to be increased.
- /// Count of each item after increment.
- ///
+ ///
public long IncrBy(RedisKey key, RedisValue item, long increment)
{
return _db.Execute(CMS.INCRBY, key, item, increment).ToLong();
}
- ///
- /// Increases the count of item by increment.
- ///
- /// The name of the sketch.
- /// The item which counter is to be increased.
- /// Amount by which the item counter is to be increased.
- /// Count of each item after increment.
- ///
+ ///
public async Task IncrByAsync(RedisKey key, RedisValue item, long increment)
{
var result = await _db.ExecuteAsync(CMS.INCRBY, key, item, increment);
return result.ToLong();
}
- ///
- /// Increases the count of item by increment.
- ///
- /// The name of the sketch.
- /// Tuple of The items which counter is to be increased
- /// and the Amount by which the item counter is to be increased.
- /// Count of each item after increment.
- ///
+ ///
public long[] IncrBy(RedisKey key, Tuple[] itemIncrements)
{
if (itemIncrements.Length < 1)
@@ -61,14 +40,7 @@ public long[] IncrBy(RedisKey key, Tuple[] itemIncrements)
return _db.Execute(CMS.INCRBY, args).ToLongArray();
}
- ///
- /// Increases the count of item by increment.
- ///
- /// The name of the sketch.
- /// Tuple of The items which counter is to be increased
- /// and the Amount by which the item counter is to be increased.
- /// Count of each item after increment.
- ///
+ ///
public async Task IncrByAsync(RedisKey key, Tuple[] itemIncrements)
{
if (itemIncrements.Length < 1)
@@ -85,95 +57,47 @@ public async Task IncrByAsync(RedisKey key, Tuple[] it
return result.ToLongArray();
}
- ///
- /// Return information about a sketch.
- ///
- /// Name of the key to return information about.
- /// Information of the sketch.
- ///
+ ///
public CmsInformation Info(RedisKey key)
{
var info = _db.Execute(CMS.INFO, key);
return info.ToCmsInfo();
}
- ///
- /// Return information about a sketch.
- ///
- /// Name of the key to return information about.
- /// Information of the sketch.
- ///
+ ///
public async Task InfoAsync(RedisKey key)
{
var info = await _db.ExecuteAsync(CMS.INFO, key);
return info.ToCmsInfo();
}
- ///
- /// Initializes a Count-Min Sketch to dimensions specified by user.
- ///
- /// TThe name of the sketch.
- /// Number of counters in each array. Reduces the error size.
- /// Number of counter-arrays. Reduces the probability for an error
- /// of a certain size (percentage of total count).
- /// if if executed correctly, Error otherwise.
- ///
+ ///
public bool InitByDim(RedisKey key, long width, long depth)
{
return _db.Execute(CMS.INITBYDIM, key, width, depth).OKtoBoolean();
}
- ///
- /// Initializes a Count-Min Sketch to dimensions specified by user.
- ///
- /// TThe name of the sketch.
- /// Number of counters in each array. Reduces the error size.
- /// Number of counter-arrays. Reduces the probability for an error
- /// of a certain size (percentage of total count).
- /// if if executed correctly, Error otherwise.
- ///
+ ///
public async Task InitByDimAsync(RedisKey key, long width, long depth)
{
var result = await _db.ExecuteAsync(CMS.INITBYDIM, key, width, depth);
return result.OKtoBoolean();
}
- ///
- /// Initializes a Count-Min Sketch to accommodate requested tolerances.
- ///
- /// The name of the sketch.
- /// Estimate size of error.
- /// The desired probability for inflated count.
- /// if if executed correctly, Error otherwise.
- ///
+ ///
public bool InitByProb(RedisKey key, double error, double probability)
{
return _db.Execute(CMS.INITBYPROB, key, error, probability).OKtoBoolean();
}
- ///
- /// Initializes a Count-Min Sketch to accommodate requested tolerances.
- ///
- /// The name of the sketch.
- /// Estimate size of error.
- /// The desired probability for inflated count.
- /// if if executed correctly, Error otherwise.
- ///
+ ///
public async Task InitByProbAsync(RedisKey key, double error, double probability)
{
var result = await _db.ExecuteAsync(CMS.INITBYPROB, key, error, probability);
return result.OKtoBoolean();
}
- ///
- /// Merges several sketches into one sketch.
- ///
- /// The name of destination sketch. Must be initialized
- /// Number of sketches to be merged.
- /// Names of source sketches to be merged.
- /// Multiple of each sketch. Default = 1.
- /// if if executed correctly, Error otherwise.
- ///
+ ///
public bool Merge(RedisValue destination, long numKeys, RedisValue[] source, long[]? weight = null)
{
if (source.Length < 1)
@@ -192,15 +116,7 @@ public bool Merge(RedisValue destination, long numKeys, RedisValue[] source, lon
return _db.Execute(CMS.MERGE, args).OKtoBoolean();
}
- ///
- /// Merges several sketches into one sketch.
- ///
- /// The name of destination sketch. Must be initialized
- /// Number of sketches to be merged.
- /// Names of source sketches to be merged.
- /// Multiple of each sketch. Default = 1.
- /// if if executed correctly, Error otherwise.
- ///
+ ///
public async Task MergeAsync(RedisValue destination, long numKeys, RedisValue[] source, long[]? weight = null)
{
if (source.Length < 1)
@@ -220,13 +136,7 @@ public async Task MergeAsync(RedisValue destination, long numKeys, RedisVa
return result.OKtoBoolean();
}
- ///
- /// Returns the count for one or more items in a sketch.
- ///
- /// The name of the sketch
- /// One or more items for which to return the count.
- /// Array with a min-count of each of the items in the sketch
- ///
+ ///
public long[] Query(RedisKey key, params RedisValue[] items)
{
if (items.Length < 1)
@@ -238,13 +148,7 @@ public long[] Query(RedisKey key, params RedisValue[] items)
return _db.Execute(CMS.QUERY, args).ToLongArray();
}
- ///
- /// Returns the count for one or more items in a sketch.
- ///
- /// The name of the sketch
- /// One or more items for which to return the count.
- /// Array with a min-count of each of the items in the sketch
- ///
+ ///
public async Task QueryAsync(RedisKey key, params RedisValue[] items)
{
if (items.Length < 1)
diff --git a/src/NRedisStack/CountMinSketch/ICmsCommands.cs b/src/NRedisStack/CountMinSketch/ICmsCommands.cs
new file mode 100644
index 00000000..338cdd0d
--- /dev/null
+++ b/src/NRedisStack/CountMinSketch/ICmsCommands.cs
@@ -0,0 +1,145 @@
+using NRedisStack.CountMinSketch.DataTypes;
+using StackExchange.Redis;
+
+namespace NRedisStack
+{
+ public interface ICmsCommands
+ {
+ ///
+ /// Increases the count of item by increment.
+ ///
+ /// The name of the sketch.
+ /// The item which counter is to be increased.
+ /// Amount by which the item counter is to be increased.
+ /// Count of each item after increment.
+ ///
+ long IncrBy(RedisKey key, RedisValue item, long increment);
+
+ ///
+ /// Increases the count of item by increment.
+ ///
+ /// The name of the sketch.
+ /// The item which counter is to be increased.
+ /// Amount by which the item counter is to be increased.
+ /// Count of each item after increment.
+ ///
+ Task IncrByAsync(RedisKey key, RedisValue item, long increment);
+
+ ///
+ /// Increases the count of item by increment.
+ ///
+ /// The name of the sketch.
+ /// Tuple of The items which counter is to be increased
+ /// and the Amount by which the item counter is to be increased.
+ /// Count of each item after increment.
+ ///
+ long[] IncrBy(RedisKey key, Tuple[] itemIncrements);
+ ///
+ /// Increases the count of item by increment.
+ ///
+ /// The name of the sketch.
+ /// Tuple of The items which counter is to be increased
+ /// and the Amount by which the item counter is to be increased.
+ /// Count of each item after increment.
+ ///
+ Task IncrByAsync(RedisKey key, Tuple[] itemIncrements);
+
+ ///
+ /// Return information about a sketch.
+ ///
+ /// Name of the key to return information about.
+ /// Information of the sketch.
+ ///
+ CmsInformation Info(RedisKey key);
+
+ ///
+ /// Return information about a sketch.
+ ///
+ /// Name of the key to return information about.
+ /// Information of the sketch.
+ ///
+ Task InfoAsync(RedisKey key);
+
+ ///
+ /// Initializes a Count-Min Sketch to dimensions specified by user.
+ ///
+ /// TThe name of the sketch.
+ /// Number of counters in each array. Reduces the error size.
+ /// Number of counter-arrays. Reduces the probability for an error
+ /// of a certain size (percentage of total count).
+ /// if if executed correctly, Error otherwise.
+ ///
+ bool InitByDim(RedisKey key, long width, long depth);
+
+ ///
+ /// Initializes a Count-Min Sketch to dimensions specified by user.
+ ///
+ /// TThe name of the sketch.
+ /// Number of counters in each array. Reduces the error size.
+ /// Number of counter-arrays. Reduces the probability for an error
+ /// of a certain size (percentage of total count).
+ /// if if executed correctly, Error otherwise.
+ ///
+ Task InitByDimAsync(RedisKey key, long width, long depth);
+
+ ///
+ /// Initializes a Count-Min Sketch to accommodate requested tolerances.
+ ///
+ /// The name of the sketch.
+ /// Estimate size of error.
+ /// The desired probability for inflated count.
+ /// if if executed correctly, Error otherwise.
+ ///
+ bool InitByProb(RedisKey key, double error, double probability);
+
+ ///
+ /// Initializes a Count-Min Sketch to accommodate requested tolerances.
+ ///
+ /// The name of the sketch.
+ /// Estimate size of error.
+ /// The desired probability for inflated count.
+ /// if if executed correctly, Error otherwise.
+ ///
+ Task InitByProbAsync(RedisKey key, double error, double probability);
+
+ ///
+ /// Merges several sketches into one sketch.
+ ///
+ /// The name of destination sketch. Must be initialized
+ /// Number of sketches to be merged.
+ /// Names of source sketches to be merged.
+ /// Multiple of each sketch. Default = 1.
+ /// if if executed correctly, Error otherwise.
+ ///
+ bool Merge(RedisValue destination, long numKeys, RedisValue[] source, long[]? weight = null);
+
+ ///
+ /// Merges several sketches into one sketch.
+ ///
+ /// The name of destination sketch. Must be initialized
+ /// Number of sketches to be merged.
+ /// Names of source sketches to be merged.
+ /// Multiple of each sketch. Default = 1.
+ /// if if executed correctly, Error otherwise.
+ ///
+ Task MergeAsync(RedisValue destination, long numKeys, RedisValue[] source, long[]? weight = null);
+
+ ///
+ /// Returns the count for one or more items in a sketch.
+ ///
+ /// The name of the sketch
+ /// One or more items for which to return the count.
+ /// Array with a min-count of each of the items in the sketch
+ ///
+ long[] Query(RedisKey key, params RedisValue[] items);
+
+ ///
+ /// Returns the count for one or more items in a sketch.
+ ///
+ /// The name of the sketch
+ /// One or more items for which to return the count.
+ /// Array with a min-count of each of the items in the sketch
+ ///
+ Task QueryAsync(RedisKey key, params RedisValue[] items);
+ }
+}
\ No newline at end of file
diff --git a/src/NRedisStack/CuckooFilter/CuckooCommands.cs b/src/NRedisStack/CuckooFilter/CuckooCommands.cs
index 85a81272..5ed390fe 100644
--- a/src/NRedisStack/CuckooFilter/CuckooCommands.cs
+++ b/src/NRedisStack/CuckooFilter/CuckooCommands.cs
@@ -4,7 +4,7 @@
namespace NRedisStack
{
- public class CuckooCommands
+ public class CuckooCommands : ICuckooCommands
{
IDatabase _db;
public CuckooCommands(IDatabase db)
@@ -12,166 +12,86 @@ public CuckooCommands(IDatabase db)
_db = db;
}
- ///
- /// Adds an item to a Cuckoo Filter.
- ///
- /// The key under which the filter is found.
- /// The item to add.
- /// if the item did not exist in the filter, otherwise.
- ///
+ ///
public bool Add(RedisKey key, RedisValue item)
{
return _db.Execute(CF.ADD, key, item).ToString() == "1";
}
- ///
- /// Adds an item to a Cuckoo Filter.
- ///
- /// The key under which the filter is found.
- /// The item to add.
- /// if the item did not exist in the filter, otherwise.
- ///
+ ///
public async Task AddAsync(RedisKey key, RedisValue item)
{
var result = await _db.ExecuteAsync(CF.ADD, key, item);
return result.ToString() == "1";
}
- ///
- /// Adds an item to a Cuckoo Filter if the item did not exist previously.
- ///
- /// The key under which the filter is found.
- /// The item to add.
- /// if the item did not exist in the filter, otherwise.
- ///
+ ///
public bool AddNX(RedisKey key, RedisValue item)
{
return _db.Execute(CF.ADDNX, key, item).ToString() == "1";
}
- ///
- /// Adds an item to a Cuckoo Filter if the item did not exist previously.
- ///
- /// The key under which the filter is found.
- /// The item to add.
- /// if the item did not exist in the filter, otherwise.
- ///
+ ///
public async Task AddNXAsync(RedisKey key, RedisValue item)
{
var result = await _db.ExecuteAsync(CF.ADDNX, key, item);
return result.ToString() == "1";
}
- ///
- /// Returns the number of times an item may be in the filter.
- ///
- /// The name of the filter
- /// The item to count.
- /// the count of possible matching copies of the item in the filter.
- ///
+ ///
public long Count(RedisKey key, RedisValue item)
{
return _db.Execute(CF.COUNT, key, item).ToLong();
}
- ///
- /// Returns the number of times an item may be in the filter.
- ///
- /// The name of the filter
- /// The item to count.
- /// the count of possible matching copies of the item in the filter.
- ///
+ ///
public async Task CountAsync(RedisKey key, RedisValue item)
{
var result = await _db.ExecuteAsync(CF.COUNT, key, item);
return result.ToLong();
}
- ///
- /// Deletes an item from the Cuckoo Filter.
- ///
- /// The name of the filter
- /// The item to delete from the filter.
- /// see langword="true"/> if the item has been deleted from the filter, otherwise.
- ///
+ ///
public bool Del(RedisKey key, RedisValue item)
{
return _db.Execute(CF.DEL, key, item).ToString() == "1";
}
- ///
- /// Deletes an item from the Cuckoo Filter.
- ///
- /// The name of the filter
- /// The item to delete from the filter.
- /// see langword="true"/> if the item has been deleted from the filter, otherwise.
- ///
+ ///
public async Task DelAsync(RedisKey key, RedisValue item)
{
var result = await _db.ExecuteAsync(CF.DEL, key, item);
return result.ToString() == "1";
}
- ///
- /// Checks whether an item exist in the Cuckoo Filter or not.
- ///
- /// The name of the filter.
- /// The item to check for.
- /// means the item may exist in the filter,
- /// and means it does not exist in the filter.
- ///
+ ///
public bool Exists(RedisKey key, RedisValue item)
{
return _db.Execute(CF.EXISTS, key, item).ToString() == "1";
}
- ///
- /// Checks whether an item exist in the Cuckoo Filter or not.
- ///
- /// The name of the filter.
- /// The item to check for.
- /// means the item may exist in the filter,
- /// and means it does not exist in the filter.
- ///
+ ///
public async Task ExistsAsync(RedisKey key, RedisValue item)
{
var result = await _db.ExecuteAsync(CF.EXISTS, key, item);
return result.ToString() == "1";
}
- ///
- /// Return information about a Cuckoo filter.
- ///
- /// Name of the key to return information about.
- /// Information of the filter.
- ///
+ ///
public CuckooInformation Info(RedisKey key)
{
var info = _db.Execute(CF.INFO, key);
return info.ToCuckooInfo();
}
- ///
- /// Return information about a Cuckoo filter.
- ///
- /// Name of the key to return information about.
- /// Information of the filter.
- ///
+ ///
public async Task InfoAsync(RedisKey key)
{
var info = await _db.ExecuteAsync(CF.INFO, key);
return info.ToCuckooInfo();
}
- ///
- /// Adds one or more items to a Cuckoo Filter. A filter will be created if it does not exist.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// (Optional) Specifies the desired capacity for the filter to be created.
- /// (Optional) to indicates that the
- /// An array of booleans.
- ///
+ ///
public bool[] Insert(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false)
{
if (items.Length < 1)
@@ -199,15 +119,7 @@ public bool[] Insert(RedisKey key, RedisValue[] items, int? capacity = null, boo
return _db.Execute(CF.INSERT, args).ToBooleanArray();
}
- ///
- /// Adds one or more items to a Cuckoo Filter. A filter will be created if it does not exist.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// (Optional) Specifies the desired capacity for the filter to be created.
- /// (Optional) to indicates that the
- /// An array of booleans.
- ///
+ ///
public async Task InsertAsync(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false)
{
if (items.Length < 1)
@@ -236,17 +148,7 @@ public async Task InsertAsync(RedisKey key, RedisValue[] items, int? cap
return result.ToBooleanArray();
}
- ///
- /// Adds one or more items to a Cuckoo Filter if the items did not exist previously.
- /// A filter will be created if it does not exist.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// (Optional) Specifies the desired capacity for the filter to be created.
- /// (Optional) to indicates that the
- /// An array of booleans.where means the item has been added to the filter,
- /// and mean, the item already existed
- ///
+ ///
public bool[] InsertNX(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false)
{
if (items.Length < 1)
@@ -274,17 +176,7 @@ public bool[] InsertNX(RedisKey key, RedisValue[] items, int? capacity = null, b
return _db.Execute(CF.INSERTNX, args).ToBooleanArray();
}
- ///
- /// Adds one or more items to a Cuckoo Filter if the items did not exist previously.
- /// A filter will be created if it does not exist.
- ///
- /// The name of the filter.
- /// One or more items to add.
- /// (Optional) Specifies the desired capacity for the filter to be created.
- /// (Optional) to indicates that the
- /// An array of booleans.where means the item has been added to the filter,
- /// and mean, the item already existed
- ///
+ ///
public async Task InsertNXAsync(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false)
{
if (items.Length < 1)
@@ -313,41 +205,20 @@ public async Task InsertNXAsync(RedisKey key, RedisValue[] items, int? c
return result.ToBooleanArray();
}
- ///
- /// Restores a filter previosly saved using SCANDUMP.
- ///
- /// Name of the key to restore.
- /// Iterator value associated with data (returned by SCANDUMP).
- /// Current data chunk (returned by SCANDUMP).
- /// Array with information of the filter.
- ///
+ ///
public bool LoadChunk(RedisKey key, long iterator, Byte[] data)
{
return _db.Execute(CF.LOADCHUNK, key, iterator, data).OKtoBoolean();
}
- ///
- /// Restores a filter previosly saved using SCANDUMP.
- ///
- /// Name of the key to restore.
- /// Iterator value associated with data (returned by SCANDUMP).
- /// Current data chunk (returned by SCANDUMP).
- /// Array with information of the filter.
- ///
+ ///
public async Task LoadChunkAsync(RedisKey key, long iterator, Byte[] data)
{
var result = await _db.ExecuteAsync(CF.LOADCHUNK, key, iterator, data);
return result.OKtoBoolean();
}
- ///
- /// Checks whether one or more items may exist in the a Cuckoo Filter.
- ///
- /// The name of the filter.
- /// One or more items to check.
- /// An array of booleans, for each item means the item may exist in the filter,
- /// and means the item may exist in the filter.
- ///
+ ///
public bool[] MExists(RedisKey key, params RedisValue[] items)
{
if (items.Length < 1)
@@ -363,14 +234,7 @@ public bool[] MExists(RedisKey key, params RedisValue[] items)
return _db.Execute(CF.MEXISTS, args).ToBooleanArray();
}
- ///
- /// Checks whether one or more items may exist in the a Cuckoo Filter.
- ///
- /// The name of the filter.
- /// One or more items to check.
- /// An array of booleans, for each item means the item may exist in the filter,
- /// and means the item may exist in the filter.
- ///
+ ///
public async Task MExistsAsync(RedisKey key, params RedisValue[] items)
{
if (items.Length < 1)
@@ -387,20 +251,9 @@ public async Task MExistsAsync(RedisKey key, params RedisValue[] items)
return result.ToBooleanArray();
}
- ///
- /// Creates a new Cuckoo Filter.
- ///
- /// The key under which the filter is found.
- /// The number of entries intended to be added to the filter.
- /// Number of items in each bucket.
- /// Number of attempts to swap items between buckets before
- /// declaring filter as full and creating an additional filter.
- /// (Optional) When capacity is reached, an additional sub-filter is
- /// created in size of the last sub-filter multiplied by expansion.
- /// if executed correctly, Error otherwise.
- ///
+ ///
public bool Reserve(RedisKey key, long capacity,
- long? bucketSize = null, int? maxIterations = null, int? expansion = null)
+ long? bucketSize = null, int? maxIterations = null, int? expansion = null)
{
List args = new List { key, capacity };
@@ -425,20 +278,9 @@ public bool Reserve(RedisKey key, long capacity,
return _db.Execute(CF.RESERVE, args).OKtoBoolean();
}
- ///
- /// Creates a new Cuckoo Filter.
- ///
- /// The key under which the filter is found.
- /// The number of entries intended to be added to the filter.
- /// Number of items in each bucket.
- /// Number of attempts to swap items between buckets before
- /// declaring filter as full and creating an additional filter.
- /// (Optional) When capacity is reached, an additional sub-filter is
- /// created in size of the last sub-filter multiplied by expansion.
- /// if executed correctly, Error otherwise.
- ///
+ ///
public async Task ReserveAsync(RedisKey key, long capacity,
- long? bucketSize = null, int? maxIterations = null, int? expansion = null)
+ long? bucketSize = null, int? maxIterations = null, int? expansion = null)
{
List args = new List { key, capacity };
@@ -464,26 +306,14 @@ public async Task ReserveAsync(RedisKey key, long capacity,
return result.OKtoBoolean();
}
- ///
- /// Begins an incremental save of the Cuckoo Filter.
- ///
- /// Name of the filter.
- /// Iterator value; either 0 or the iterator from a previous invocation of this command.
- /// Tuple of iterator and data.
- ///
- public Tuple ScanDump(RedisKey key, long iterator)
+ ///
+ public Tuple ScanDump(RedisKey key, long iterator)
{
return _db.Execute(CF.SCANDUMP, key, iterator).ToScanDumpTuple();
}
- ///
- /// Begins an incremental save of the Cuckoo Filter.
- ///
- /// Name of the filter.
- /// Iterator value; either 0 or the iterator from a previous invocation of this command.
- /// Tuple of iterator and data.
- ///
- public async Task> ScanDumpAsync(RedisKey key, long iterator)
+ ///
+ public async Task> ScanDumpAsync(RedisKey key, long iterator)
{
var result = await _db.ExecuteAsync(CF.SCANDUMP, key, iterator);
return result.ToScanDumpTuple();
diff --git a/src/NRedisStack/CuckooFilter/ICuckooCommands.cs b/src/NRedisStack/CuckooFilter/ICuckooCommands.cs
new file mode 100644
index 00000000..d0e38f92
--- /dev/null
+++ b/src/NRedisStack/CuckooFilter/ICuckooCommands.cs
@@ -0,0 +1,251 @@
+using NRedisStack.CuckooFilter.DataTypes;
+using StackExchange.Redis;
+namespace NRedisStack
+{
+ public interface ICuckooCommands
+ {
+ ///
+ /// Adds an item to a Cuckoo Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The item to add.
+ /// if the item did not exist in the filter, otherwise.
+ ///
+ bool Add(RedisKey key, RedisValue item);
+
+ ///
+ /// Adds an item to a Cuckoo Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The item to add.
+ /// if the item did not exist in the filter, otherwise.
+ ///
+ Task AddAsync(RedisKey key, RedisValue item);
+
+ ///
+ /// Adds an item to a Cuckoo Filter if the item did not exist previously.
+ ///
+ /// The key under which the filter is found.
+ /// The item to add.
+ /// if the item did not exist in the filter, otherwise.
+ ///
+ bool AddNX(RedisKey key, RedisValue item);
+
+ ///
+ /// Adds an item to a Cuckoo Filter if the item did not exist previously.
+ ///
+ /// The key under which the filter is found.
+ /// The item to add.
+ /// if the item did not exist in the filter, otherwise.
+ ///
+ Task AddNXAsync(RedisKey key, RedisValue item);
+
+ ///
+ /// Returns the number of times an item may be in the filter.
+ ///
+ /// The name of the filter
+ /// The item to count.
+ /// the count of possible matching copies of the item in the filter.
+ ///
+ long Count(RedisKey key, RedisValue item);
+
+ ///
+ /// Returns the number of times an item may be in the filter.
+ ///
+ /// The name of the filter
+ /// The item to count.
+ /// the count of possible matching copies of the item in the filter.
+ ///
+ Task CountAsync(RedisKey key, RedisValue item);
+
+ ///
+ /// Deletes an item from the Cuckoo Filter.
+ ///
+ /// The name of the filter
+ /// The item to delete from the filter.
+ /// see langword="true"/> if the item has been deleted from the filter, otherwise.
+ ///
+ bool Del(RedisKey key, RedisValue item);
+
+ ///
+ /// Deletes an item from the Cuckoo Filter.
+ ///
+ /// The name of the filter
+ /// The item to delete from the filter.
+ /// see langword="true"/> if the item has been deleted from the filter, otherwise.
+ ///
+ Task DelAsync(RedisKey key, RedisValue item);
+
+ ///
+ /// Checks whether an item exist in the Cuckoo Filter or not.
+ ///
+ /// The name of the filter.
+ /// The item to check for.
+ /// means the item may exist in the filter,
+ /// and means it does not exist in the filter.
+ ///
+ bool Exists(RedisKey key, RedisValue item);
+
+ ///
+ /// Checks whether an item exist in the Cuckoo Filter or not.
+ ///
+ /// The name of the filter.
+ /// The item to check for.
+ /// means the item may exist in the filter,
+ /// and means it does not exist in the filter.
+ ///
+ Task ExistsAsync(RedisKey key, RedisValue item);
+
+ ///
+ /// Return information about a Cuckoo filter.
+ ///
+ /// Name of the key to return information about.
+ /// Information of the filter.
+ ///
+ CuckooInformation Info(RedisKey key);
+
+ ///
+ /// Return information about a Cuckoo filter.
+ ///
+ /// Name of the key to return information about.
+ /// Information of the filter.
+ ///
+ Task InfoAsync(RedisKey key);
+
+ ///
+ /// Adds one or more items to a Cuckoo Filter. A filter will be created if it does not exist.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// (Optional) Specifies the desired capacity for the filter to be created.
+ /// (Optional) to indicates that the
+ /// An array of booleans.
+ ///
+ bool[] Insert(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false);
+
+ ///
+ /// Adds one or more items to a Cuckoo Filter. A filter will be created if it does not exist.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// (Optional) Specifies the desired capacity for the filter to be created.
+ /// (Optional) to indicates that the
+ /// An array of booleans.
+ ///
+ Task InsertAsync(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false);
+
+ ///
+ /// Adds one or more items to a Cuckoo Filter if the items did not exist previously.
+ /// A filter will be created if it does not exist.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// (Optional) Specifies the desired capacity for the filter to be created.
+ /// (Optional) to indicates that the
+ /// An array of booleans.where means the item has been added to the filter,
+ /// and mean, the item already existed
+ ///
+ bool[] InsertNX(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false);
+
+ ///
+ /// Adds one or more items to a Cuckoo Filter if the items did not exist previously.
+ /// A filter will be created if it does not exist.
+ ///
+ /// The name of the filter.
+ /// One or more items to add.
+ /// (Optional) Specifies the desired capacity for the filter to be created.
+ /// (Optional) to indicates that the
+ /// An array of booleans.where means the item has been added to the filter,
+ /// and mean, the item already existed
+ ///
+ Task InsertNXAsync(RedisKey key, RedisValue[] items, int? capacity = null, bool nocreate = false);
+
+ ///
+ /// Restores a filter previosly saved using SCANDUMP.
+ ///
+ /// Name of the key to restore.
+ /// Iterator value associated with data (returned by SCANDUMP).
+ /// Current data chunk (returned by SCANDUMP).
+ /// Array with information of the filter.
+ ///
+ bool LoadChunk(RedisKey key, long iterator, Byte[] data);
+
+ ///
+ /// Restores a filter previosly saved using SCANDUMP.
+ ///
+ /// Name of the key to restore.
+ /// Iterator value associated with data (returned by SCANDUMP).
+ /// Current data chunk (returned by SCANDUMP).
+ /// Array with information of the filter.
+ ///
+ Task LoadChunkAsync(RedisKey key, long iterator, Byte[] data);
+
+ ///
+ /// Checks whether one or more items may exist in the a Cuckoo Filter.
+ ///
+ /// The name of the filter.
+ /// One or more items to check.
+ /// An array of booleans, for each item means the item may exist in the filter,
+ /// and means the item may exist in the filter.
+ ///
+ bool[] MExists(RedisKey key, params RedisValue[] items);
+
+ ///
+ /// Checks whether one or more items may exist in the a Cuckoo Filter.
+ ///
+ /// The name of the filter.
+ /// One or more items to check.
+ /// An array of booleans, for each item means the item may exist in the filter,
+ /// and means the item may exist in the filter.
+ ///
+ Task MExistsAsync(RedisKey key, params RedisValue[] items);
+
+ ///
+ /// Creates a new Cuckoo Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The number of entries intended to be added to the filter.
+ /// Number of items in each bucket.
+ /// Number of attempts to swap items between buckets before
+ /// declaring filter as full and creating an additional filter.
+ /// (Optional) When capacity is reached, an additional sub-filter is
+ /// created in size of the last sub-filter multiplied by expansion.
+ /// if executed correctly, Error otherwise.
+ ///
+ bool Reserve(RedisKey key, long capacity,
+ long? bucketSize = null, int? maxIterations = null, int? expansion = null);
+
+ ///
+ /// Creates a new Cuckoo Filter.
+ ///
+ /// The key under which the filter is found.
+ /// The number of entries intended to be added to the filter.
+ /// Number of items in each bucket.
+ /// Number of attempts to swap items between buckets before
+ /// declaring filter as full and creating an additional filter.
+ /// (Optional) When capacity is reached, an additional sub-filter is
+ /// created in size of the last sub-filter multiplied by expansion.
+ /// if executed correctly, Error otherwise.
+ ///
+ Task ReserveAsync(RedisKey key, long capacity,
+ long? bucketSize = null, int? maxIterations = null, int? expansion = null);
+
+ ///
+ /// Begins an incremental save of the Cuckoo Filter.
+ ///
+ /// Name of the filter.
+ /// Iterator value; either 0 or the iterator from a previous invocation of this command.
+ /// Tuple of iterator and data.
+ ///
+ Tuple ScanDump(RedisKey key, long iterator);
+
+ ///
+ /// Begins an incremental save of the Cuckoo Filter.
+ ///
+ /// Name of the filter.
+ /// Iterator value; either 0 or the iterator from a previous invocation of this command.
+ /// Tuple of iterator and data.
+ ///
+ Task> ScanDumpAsync(RedisKey key, long iterator);
+ }
+}
\ No newline at end of file
diff --git a/src/NRedisStack/ModulPrefixes.cs b/src/NRedisStack/ModulPrefixes.cs
index fd748a00..09e506bf 100644
--- a/src/NRedisStack/ModulPrefixes.cs
+++ b/src/NRedisStack/ModulPrefixes.cs
@@ -4,20 +4,20 @@ namespace NRedisStack.RedisStackCommands
{
public static class ModulPrefixes
{
- static public BloomCommands BF(this IDatabase db) => new BloomCommands(db);
+ static public IBloomCommands BF(this IDatabase db) => new BloomCommands(db);
- static public CuckooCommands CF(this IDatabase db) => new CuckooCommands(db);
+ static public ICuckooCommands CF(this IDatabase db) => new CuckooCommands(db);
- static public CmsCommands CMS(this IDatabase db) => new CmsCommands(db);
+ static public ICmsCommands CMS(this IDatabase db) => new CmsCommands(db);
- static public TopKCommands TOPK(this IDatabase db) => new TopKCommands(db);
+ static public ITopKCommands TOPK(this IDatabase db) => new TopKCommands(db);
- static public TdigestCommands TDIGEST(this IDatabase db) => new TdigestCommands(db);
+ static public ITdigestCommands TDIGEST(this IDatabase db) => new TdigestCommands(db);
- static public SearchCommands FT(this IDatabase db) => new SearchCommands(db);
+ static public ISearchCommands FT(this IDatabase db) => new SearchCommands(db);
- static public JsonCommands JSON(this IDatabase db) => new JsonCommands(db);
+ static public IJsonCommands JSON(this IDatabase db) => new JsonCommands(db);
- static public TimeSeriesCommands TS(this IDatabase db) => new TimeSeriesCommands(db);
+ static public ITimeSeriesCommands TS(this IDatabase db) => new TimeSeriesCommands(db);
}
}
\ No newline at end of file
diff --git a/src/NRedisStack/Search/ISearchCommands.cs b/src/NRedisStack/Search/ISearchCommands.cs
new file mode 100644
index 00000000..5752f7a1
--- /dev/null
+++ b/src/NRedisStack/Search/ISearchCommands.cs
@@ -0,0 +1,419 @@
+using NRedisStack.Search;
+using NRedisStack.Search.Aggregation;
+using NRedisStack.Search.DataTypes;
+using NRedisStack.Search.FT.CREATE;
+using StackExchange.Redis;
+
+namespace NRedisStack
+{
+ public interface ISearchCommands
+ {
+
+ ///
+ /// Returns a list of all existing indexes.
+ ///
+ /// Array with index names.
+ ///
+ RedisResult[] _List();
+
+ ///
+ /// Returns a list of all existing indexes.
+ ///
+ /// Array with index names.
+ ///
+ Task _ListAsync();
+
+ ///
+ /// Run a search query on an index, and perform aggregate transformations on the results.
+ ///
+ /// The index name.
+ /// The query
+ /// An object
+ ///
+ AggregationResult Aggregate(string index, AggregationRequest query);
+
+ ///
+ /// Run a search query on an index, and perform aggregate transformations on the results.
+ ///
+ /// The index name.
+ /// The query
+ /// An object
+ ///
+ Task AggregateAsync(string index, AggregationRequest query);
+
+ ///
+ /// Add an alias to an index.
+ ///
+ /// Alias to be added to an index.
+ /// The index name.
+ /// if executed correctly, error otherwise
+ ///
+ bool AliasAdd(string alias, string index);
+
+ ///
+ /// Add an alias to an index.
+ ///
+ /// Alias to be added to an index.
+ /// The index name.
+ /// if executed correctly, error otherwise
+ ///
+ Task AliasAddAsync(string alias, string index);
+
+ ///
+ /// Remove an alias to an index.
+ ///
+ /// Alias to be removed.
+ /// if executed correctly, error otherwise
+ ///
+ bool AliasDel(string alias);
+
+ ///
+ /// Remove an alias to an index.
+ ///
+ /// Alias to be removed.
+ /// if executed correctly, error otherwise
+ ///
+ Task AliasDelAsync(string alias);
+
+ ///
+ /// Add an alias to an index. If the alias is already associated with another index,
+ /// FT.ALIASUPDATE removes the alias association with the previous index.
+ ///
+ /// Alias to be removed.
+ /// The index name.
+ /// if executed correctly, error otherwise
+ ///
+ bool AliasUpdate(string alias, string index);
+
+ ///
+ /// Add an alias to an index. If the alias is already associated with another index,
+ /// FT.ALIASUPDATE removes the alias association with the previous index.
+ ///
+ /// Alias to be removed.
+ /// The index name.
+ /// if executed correctly, error otherwise
+ ///
+ Task AliasUpdateAsync(string alias, string index);
+
+ ///
+ /// Add a new attribute to the index
+ ///
+ /// The index name.
+ /// If set, does not scan and index.
+ /// the schema.
+ /// if executed correctly, error otherwise
+ ///
+ bool Alter(string index, Schema schema, bool skipInitialScan = false);
+
+ ///
+ /// Add a new attribute to the index
+ ///
+ /// The index name.
+ /// If set, does not scan and index.
+ /// the schema.
+ /// if executed correctly, error otherwise
+ ///
+ Task AlterAsync(string index, Schema schema, bool skipInitialScan = false);
+
+ ///
+ /// Retrieve configuration options.
+ ///
+ /// is name of the configuration option, or '*' for all.
+ /// An array reply of the configuration name and value.
+ ///
+ Dictionary ConfigGet(string option);
+
+ ///
+ /// Retrieve configuration options.
+ ///
+ /// is name of the configuration option, or '*' for all.
+ /// An array reply of the configuration name and value.
+ ///
+ Task> ConfigGetAsync(string option);
+ ///
+ /// Describe configuration options.
+ ///
+ /// is name of the configuration option, or '*' for all.
+ /// is value of the configuration option.
+ /// if executed correctly, error otherwise.
+ ///
+ bool ConfigSet(string option, string value);
+
+ ///
+ /// Describe configuration options.
+ ///
+ /// is name of the configuration option, or '*' for all.
+ /// is value of the configuration option.
+ /// if executed correctly, error otherwise.
+ ///
+ Task ConfigSetAsync(string option, string value);
+
+ ///
+ /// Create an index with the given specification.
+ ///
+ /// The index name.
+ /// Command's parameters.
+ /// The index schema.
+ /// if executed correctly, error otherwise
+ ///
+ bool Create(string indexName, FTCreateParams parameters, Schema schema);
+
+ ///
+ /// Create an index with the given specification.
+ ///
+ /// The index name.
+ /// Command's parameters.
+ /// The index schema.
+ /// if executed correctly, error otherwise
+ ///
+ Task CreateAsync(string indexName, FTCreateParams parameters, Schema schema);
+
+ ///
+ /// Delete a cursor from the index.
+ ///
+ /// The index name
+ /// The cursor's ID.
+ /// if it has been deleted, if it did not exist.
+ ///
+ bool CursorDel(string indexName, long cursorId);
+
+ ///
+ /// Delete a cursor from the index.
+ ///
+ /// The index name
+ /// The cursor's ID.
+ /// if it has been deleted, if it did not exist.
+ ///
+ Task CursorDelAsync(string indexName, long cursorId);
+
+ ///
+ /// Read next results from an existing cursor.
+ ///
+ /// The index name
+ /// The cursor's ID.
+ /// Limit the amount of returned results.
+ /// A AggregationResult object with the results
+ ///
+ AggregationResult CursorRead(string indexName, long cursorId, int? count = null);
+
+ ///
+ /// Read next results from an existing cursor.
+ ///
+ /// The index name
+ /// The cursor's ID.
+ /// Limit the amount of returned results.
+ /// A AggregationResult object with the results
+ ///
+ Task CursorReadAsync(string indexName, long cursorId, int? count = null);
+
+ ///
+ /// Add terms to a dictionary.
+ ///
+ /// The dictionary name
+ /// Terms to add to the dictionary..
+ /// The number of new terms that were added.
+ ///
+ long DictAdd(string dict, params string[] terms);
+
+ ///
+ /// Add terms to a dictionary.
+ ///
+ /// The dictionary name
+ /// Terms to add to the dictionary..
+ /// The number of new terms that were added.
+ ///
+ Task DictAddAsync(string dict, params string[] terms);
+
+ ///
+ /// Delete terms from a dictionary.
+ ///
+ /// The dictionary name
+ /// Terms to delete to the dictionary..
+ /// The number of new terms that were deleted.
+ ///
+ long DictDel(string dict, params string[] terms);
+
+ ///
+ /// Delete terms from a dictionary.
+ ///
+ /// The dictionary name
+ /// Terms to delete to the dictionary..
+ /// The number of new terms that were deleted.
+ ///
+ Task DictDelAsync(string dict, params string[] terms);
+
+ ///
+ /// Dump all terms in the given dictionary.
+ ///
+ /// The dictionary name
+ /// An array, where each element is term.
+ ///
+ RedisResult[] DictDump(string dict);
+
+ ///
+ /// Dump all terms in the given dictionary.
+ ///
+ /// The dictionary name
+ /// An array, where each element is term.
+ ///
+ Task DictDumpAsync(string dict);
+
+ ///
+ /// Delete an index.
+ ///
+ /// The index name
+ /// If set, deletes the actual document hashes.
+ /// if executed correctly, error otherwise
+ ///
+ bool DropIndex(string indexName, bool dd = false);
+
+ ///
+ /// Delete an index.
+ ///
+ /// The index name
+ /// If set, deletes the actual document hashes.
+ /// if executed correctly, error otherwise
+ ///
+ Task DropIndexAsync(string indexName, bool dd = false);
+
+ ///
+ /// Return the execution plan for a complex query
+ ///
+ /// The index name
+ /// The query to explain
+ /// String that representing the execution plan
+ ///
+ string Explain(string indexName, Query q);
+
+ ///
+ /// Return the execution plan for a complex query
+ ///
+ /// The index name
+ /// The query to explain
+ /// String that representing the execution plan
+ ///
+ Task ExplainAsync(string indexName, Query q);
+
+ ///
+ /// Return the execution plan for a complex query
+ ///
+ /// The index name
+ /// The query to explain
+ /// An array reply with a string representing the execution plan
+ ///
+ RedisResult[] ExplainCli(string indexName, Query q);
+
+ ///
+ /// Return the execution plan for a complex query
+ ///
+ /// The index name
+ /// The query to explain
+ /// An array reply with a string representing the execution plan
+ ///
+ Task ExplainCliAsync(string indexName, Query q);
+
+ // ///
+ // /// Return information and statistics on the index.
+ // ///
+ // /// The name of the index.
+ // /// Dictionary of key and value with information about the index
+ // ///
+ // Dictionary Info(RedisValue index);
+
+ ///
+ /// Return information and statistics on the index.
+ ///
+ /// The name of the index.
+ /// Dictionary of key and value with information about the index
+ ///
+ InfoResult Info(RedisValue index);
+
+ ///
+ /// Return information and statistics on the index.
+ ///
+ /// The name of the index.
+ /// Dictionary of key and value with information about the index
+ ///
+ Task InfoAsync(RedisValue index);
+
+ // TODO: FT.PROFILE (jedis doesn't have it)
+
+ ///
+ /// Search the index
+ ///
+ /// The index name
+ /// a object with the query string and optional parameters
+ /// a object with the results
+ ///
+ SearchResult Search(string indexName, Query q);
+
+ ///
+ /// Search the index
+ ///
+ /// The index name
+ /// a object with the query string and optional parameters
+ /// a object with the results
+ ///
+ Task SearchAsync(string indexName, Query q);
+
+ ///
+ /// Dump the contents of a synonym group.
+ ///
+ /// The index name
+ /// Pairs of term and an array of synonym groups.
+ ///
+ Dictionary> SynDump(string indexName);
+
+ // TODO: FT.SPELLCHECK (jedis doesn't have it)
+
+ ///
+ /// Dump the contents of a synonym group.
+ ///
+ /// The index name
+ /// Pairs of term and an array of synonym groups.
+ ///
+ Task>> SynDumpAsync(string indexName);
+
+ ///
+ /// Update a synonym group.
+ ///
+ /// The index name
+ /// Is synonym group to return
+ /// does not scan and index, and only documents
+ /// that are indexed after the update are affected
+ /// The terms
+ /// Pairs of term and an array of synonym groups.
+ ///
+ bool SynUpdate(string indexName, string synonymGroupId, bool skipInitialScan = false, params string[] terms);
+
+ ///
+ /// Update a synonym group.
+ ///
+ /// The index name
+ /// Is synonym group to return
+ /// does not scan and index, and only documents
+ /// that are indexed after the update are affected
+ /// The terms
+ /// Pairs of term and an array of synonym groups.
+ ///
+ Task SynUpdateAsync(string indexName, string synonymGroupId, bool skipInitialScan = false, params string[] terms);
+
+ ///
+ /// Return a distinct set of values indexed in a Tag field.
+ ///
+ /// The index name
+ /// TAG field name
+ /// List of TAG field values
+ ///
+ RedisResult[] TagVals(string indexName, string fieldName);
+
+ ///
+ /// Return a distinct set of values indexed in a Tag field.
+ ///
+ /// The index name
+ /// TAG field name
+ /// List of TAG field values
+ ///
+ Task TagValsAsync(string indexName, string fieldName);
+ }
+}
\ No newline at end of file
diff --git a/src/NRedisStack/Search/SearchCommands.cs b/src/NRedisStack/Search/SearchCommands.cs
index ade99ff0..5aa8b4d8 100644
--- a/src/NRedisStack/Search/SearchCommands.cs
+++ b/src/NRedisStack/Search/SearchCommands.cs
@@ -6,7 +6,7 @@
using StackExchange.Redis;
namespace NRedisStack
{
- public class SearchCommands
+ public class SearchCommands : ISearchCommands
{
IDatabase _db;
public SearchCommands(IDatabase db)
@@ -14,33 +14,19 @@ public SearchCommands(IDatabase db)
_db = db;
}
- ///
- /// Returns a list of all existing indexes.
- ///
- /// Array with index names.
- ///
+ ///
public RedisResult[] _List()
{
return _db.Execute(FT._LIST).ToArray();
}
- ///
- /// Returns a list of all existing indexes.
- ///
- /// Array with index names.
- ///
+ ///
public async Task _ListAsync()
{
return (await _db.ExecuteAsync(FT._LIST)).ToArray();
}
- ///
- /// Run a search query on an index, and perform aggregate transformations on the results.
- ///
- /// The index name.
- /// The query
- /// An object
- ///
+ ///
public AggregationResult Aggregate(string index, AggregationRequest query)
{
List args = new List { index };
@@ -62,13 +48,7 @@ public AggregationResult Aggregate(string index, AggregationRequest query)
}
}
- ///
- /// Run a search query on an index, and perform aggregate transformations on the results.
- ///
- /// The index name.
- /// The query
- /// An object
- ///
+ ///
public async Task AggregateAsync(string index, AggregationRequest query)
{
List args = new List { index };
@@ -90,86 +70,43 @@ public async Task AggregateAsync(string index, AggregationReq
}
}
- ///
- /// Add an alias to an index.
- ///
- /// Alias to be added to an index.
- /// The index name.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool AliasAdd(string alias, string index)
{
return _db.Execute(FT.ALIASADD, alias, index).OKtoBoolean();
}
- ///
- /// Add an alias to an index.
- ///
- /// Alias to be added to an index.
- /// The index name.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task AliasAddAsync(string alias, string index)
{
return (await _db.ExecuteAsync(FT.ALIASADD, alias, index)).OKtoBoolean();
}
- ///
- /// Remove an alias to an index.
- ///
- /// Alias to be removed.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool AliasDel(string alias)
{
return _db.Execute(FT.ALIASDEL, alias).OKtoBoolean();
}
- ///
- /// Remove an alias to an index.
- ///
- /// Alias to be removed.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task AliasDelAsync(string alias)
{
return (await _db.ExecuteAsync(FT.ALIASDEL, alias)).OKtoBoolean();
}
- ///
- /// Add an alias to an index. If the alias is already associated with another index,
- /// FT.ALIASUPDATE removes the alias association with the previous index.
- ///
- /// Alias to be removed.
- /// The index name.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool AliasUpdate(string alias, string index)
{
return _db.Execute(FT.ALIASUPDATE, alias, index).OKtoBoolean();
}
- ///
- /// Add an alias to an index. If the alias is already associated with another index,
- /// FT.ALIASUPDATE removes the alias association with the previous index.
- ///
- /// Alias to be removed.
- /// The index name.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task AliasUpdateAsync(string alias, string index)
{
return (await _db.ExecuteAsync(FT.ALIASUPDATE, alias, index)).OKtoBoolean();
}
- ///
- /// Add a new attribute to the index
- ///
- /// The index name.
- /// If set, does not scan and index.
- /// the schema.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool Alter(string index, Schema schema, bool skipInitialScan = false)
{
List args = new List() { index };
@@ -183,14 +120,7 @@ public bool Alter(string index, Schema schema, bool skipInitialScan = false)
return _db.Execute(FT.ALTER, args).OKtoBoolean();
}
- ///
- /// Add a new attribute to the index
- ///
- /// The index name.
- /// If set, does not scan and index.
- /// the schema.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task AlterAsync(string index, Schema schema, bool skipInitialScan = false)
{
List args = new List() { index };
@@ -204,61 +134,32 @@ public async Task AlterAsync(string index, Schema schema, bool skipInitial
return (await _db.ExecuteAsync(FT.ALTER, args)).OKtoBoolean();
}
- ///
- /// Retrieve configuration options.
- ///
- /// is name of the configuration option, or '*' for all.
- /// An array reply of the configuration name and value.
- ///
+ ///
public Dictionary ConfigGet(string option)
{
var result = _db.Execute(FT.CONFIG, "GET", option);
return result.ToConfigDictionary();
}
- ///
- /// Retrieve configuration options.
- ///
- /// is name of the configuration option, or '*' for all.
- /// An array reply of the configuration name and value.
- ///
+ ///
public async Task> ConfigGetAsync(string option)
{
return (await _db.ExecuteAsync(FT.CONFIG, "GET", option)).ToConfigDictionary();
}
- ///
- /// Describe configuration options.
- ///
- /// is name of the configuration option, or '*' for all.
- /// is value of the configuration option.
- /// if executed correctly, error otherwise.
- ///
+ ///
public bool ConfigSet(string option, string value)
{
return _db.Execute(FT.CONFIG, "SET", option, value).OKtoBoolean();
}
- ///
- /// Describe configuration options.
- ///
- /// is name of the configuration option, or '*' for all.
- /// is value of the configuration option.
- /// if executed correctly, error otherwise.
- ///
+ ///
public async Task ConfigSetAsync(string option, string value)
{
return (await _db.ExecuteAsync(FT.CONFIG, "SET", option, value)).OKtoBoolean();
}
- ///
- /// Create an index with the given specification.
- ///
- /// The index name.
- /// Command's parameters.
- /// The index schema.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool Create(string indexName, FTCreateParams parameters, Schema schema)
{
var args = new List() { indexName };
@@ -274,14 +175,7 @@ public bool Create(string indexName, FTCreateParams parameters, Schema schema)
return _db.Execute(FT.CREATE, args).OKtoBoolean();
}
- ///
- /// Create an index with the given specification.
- ///
- /// The index name.
- /// Command's parameters.
- /// The index schema.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task CreateAsync(string indexName, FTCreateParams parameters, Schema schema)
{
var args = new List() { indexName };
@@ -294,38 +188,19 @@ public async Task CreateAsync(string indexName, FTCreateParams parameters,
return (await _db.ExecuteAsync(FT.CREATE, args)).OKtoBoolean();
}
- ///
- /// Delete a cursor from the index.
- ///
- /// The index name
- /// The cursor's ID.
- /// if it has been deleted, if it did not exist.
- ///
+ ///
public bool CursorDel(string indexName, long cursorId)
{
return _db.Execute(FT.CURSOR, "DEL", indexName, cursorId).OKtoBoolean();
}
- ///
- /// Delete a cursor from the index.
- ///
- /// The index name
- /// The cursor's ID.
- /// if it has been deleted, if it did not exist.
- ///
+ ///
public async Task CursorDelAsync(string indexName, long cursorId)
{
return (await _db.ExecuteAsync(FT.CURSOR, "DEL", indexName, cursorId)).OKtoBoolean();
}
- ///
- /// Read next results from an existing cursor.
- ///
- /// The index name
- /// The cursor's ID.
- /// Limit the amount of returned results.
- /// A AggregationResult object with the results
- ///
+ ///
public AggregationResult CursorRead(string indexName, long cursorId, int? count = null)
{
RedisResult[] resp = ((count == null) ? _db.Execute(FT.CURSOR, "READ", indexName, cursorId)
@@ -335,14 +210,7 @@ public AggregationResult CursorRead(string indexName, long cursorId, int? count
return new AggregationResult(resp[0], (long)resp[1]);
}
- ///
- /// Read next results from an existing cursor.
- ///
- /// The index name
- /// The cursor's ID.
- /// Limit the amount of returned results.
- /// A AggregationResult object with the results
- ///
+ ///
public async Task CursorReadAsync(string indexName, long cursorId, int? count = null)
{
RedisResult[] resp = (await ((count == null) ? _db.ExecuteAsync(FT.CURSOR, "READ", indexName, cursorId)
@@ -352,13 +220,7 @@ public async Task CursorReadAsync(string indexName, long curs
return new AggregationResult(resp[0], (long)resp[1]);
}
- ///
- /// Add terms to a dictionary.
- ///
- /// The dictionary name
- /// Terms to add to the dictionary..
- /// The number of new terms that were added.
- ///
+ ///
public long DictAdd(string dict, params string[] terms)
{
if (terms.Length < 1)
@@ -375,13 +237,7 @@ public long DictAdd(string dict, params string[] terms)
return _db.Execute(FT.DICTADD, args).ToLong();
}
- ///
- /// Add terms to a dictionary.
- ///
- /// The dictionary name
- /// Terms to add to the dictionary..
- /// The number of new terms that were added.
- ///
+ ///
public async Task DictAddAsync(string dict, params string[] terms)
{
if (terms.Length < 1)
@@ -398,13 +254,7 @@ public async Task DictAddAsync(string dict, params string[] terms)
return (await _db.ExecuteAsync(FT.DICTADD, args)).ToLong();
}
- ///
- /// Delete terms from a dictionary.
- ///
- /// The dictionary name
- /// Terms to delete to the dictionary..
- /// The number of new terms that were deleted.
- ///
+ ///
public long DictDel(string dict, params string[] terms)
{
if (terms.Length < 1)
@@ -421,13 +271,7 @@ public long DictDel(string dict, params string[] terms)
return _db.Execute(FT.DICTDEL, args).ToLong();
}
- ///
- /// Delete terms from a dictionary.
- ///
- /// The dictionary name
- /// Terms to delete to the dictionary..
- /// The number of new terms that were deleted.
- ///
+ ///
public async Task DictDelAsync(string dict, params string[] terms)
{
if (terms.Length < 1)
@@ -444,35 +288,19 @@ public async Task DictDelAsync(string dict, params string[] terms)
return (await _db.ExecuteAsync(FT.DICTDEL, args)).ToLong();
}
- ///
- /// Dump all terms in the given dictionary.
- ///
- /// The dictionary name
- /// An array, where each element is term.
- ///
+ ///
public RedisResult[] DictDump(string dict)
{
return _db.Execute(FT.DICTDUMP, dict).ToArray();
}
- ///
- /// Dump all terms in the given dictionary.
- ///
- /// The dictionary name
- /// An array, where each element is term.
- ///
+ ///
public async Task DictDumpAsync(string dict)
{
return (await _db.ExecuteAsync(FT.DICTDUMP, dict)).ToArray();
}
- ///
- /// Delete an index.
- ///
- /// The index name
- /// If set, deletes the actual document hashes.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool DropIndex(string indexName, bool dd = false)
{
return ((dd) ? _db.Execute(FT.DROPINDEX, indexName, "DD")
@@ -480,13 +308,7 @@ public bool DropIndex(string indexName, bool dd = false)
.OKtoBoolean();
}
- ///
- /// Delete an index.
- ///
- /// The index name
- /// If set, deletes the actual document hashes.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task DropIndexAsync(string indexName, bool dd = false)
{
return (await ((dd) ? _db.ExecuteAsync(FT.DROPINDEX, indexName, "DD")
@@ -494,13 +316,7 @@ public async Task DropIndexAsync(string indexName, bool dd = false)
.OKtoBoolean();
}
- ///
- /// Return the execution plan for a complex query
- ///
- /// The index name
- /// The query to explain
- /// String that representing the execution plan
- ///
+ ///
public string Explain(string indexName, Query q)
{
var args = new List { indexName };
@@ -508,13 +324,7 @@ public string Explain(string indexName, Query q)
return _db.Execute(FT.EXPLAIN, args).ToString();
}
- ///
- /// Return the execution plan for a complex query
- ///
- /// The index name
- /// The query to explain
- /// String that representing the execution plan
- ///
+ ///
public async Task ExplainAsync(string indexName, Query q)
{
var args = new List { indexName };
@@ -522,13 +332,7 @@ public async Task ExplainAsync(string indexName, Query q)
return (await _db.ExecuteAsync(FT.EXPLAIN, args)).ToString();
}
- ///
- /// Return the execution plan for a complex query
- ///
- /// The index name
- /// The query to explain
- /// An array reply with a string representing the execution plan
- ///
+ ///
public RedisResult[] ExplainCli(string indexName, Query q)
{
var args = new List { indexName };
@@ -536,13 +340,7 @@ public RedisResult[] ExplainCli(string indexName, Query q)
return _db.Execute(FT.EXPLAINCLI, args).ToArray();
}
- ///
- /// Return the execution plan for a complex query
- ///
- /// The index name
- /// The query to explain
- /// An array reply with a string representing the execution plan
- ///
+ ///
public async Task ExplainCliAsync(string indexName, Query q)
{
var args = new List { indexName };
@@ -550,44 +348,17 @@ public async Task ExplainCliAsync(string indexName, Query q)
return (await _db.ExecuteAsync(FT.EXPLAINCLI, args)).ToArray();
}
- // ///
- // /// Return information and statistics on the index.
- // ///
- // /// The name of the index.
- // /// Dictionary of key and value with information about the index
- // ///
- // public Dictionary Info(RedisValue index)
- // {
- // return _db.Execute(FT.INFO, index).ToFtInfoAsDictionary();
- // }
-
- ///
- /// Return information and statistics on the index.
- ///
- /// The name of the index.
- /// Dictionary of key and value with information about the index
- ///
+ ///
public InfoResult Info(RedisValue index) =>
- new InfoResult(_db.Execute("FT.INFO", index));
-
- ///
- /// Return information and statistics on the index.
- ///
- /// The name of the index.
- /// Dictionary of key and value with information about the index
- ///
+ new InfoResult(_db.Execute("FT.INFO", index));
+
+ ///
public async Task InfoAsync(RedisValue index) =>
- new InfoResult(await _db.ExecuteAsync("FT.INFO", index));
+ new InfoResult(await _db.ExecuteAsync("FT.INFO", index));
// TODO: FT.PROFILE (jedis doesn't have it)
- ///
- /// Search the index
- ///
- /// The index name
- /// a object with the query string and optional parameters
- /// a object with the results
- ///
+ ///
public SearchResult Search(string indexName, Query q)
{
var args = new List { indexName };
@@ -597,13 +368,7 @@ public SearchResult Search(string indexName, Query q)
return new SearchResult(resp, !q.NoContent, q.WithScores, q.WithPayloads, q.ExplainScore);
}
- ///
- /// Search the index
- ///
- /// The index name
- /// a object with the query string and optional parameters
- /// a object with the results
- ///
+ ///
public async Task SearchAsync(string indexName, Query q)
{
var args = new List { indexName };
@@ -612,12 +377,7 @@ public async Task SearchAsync(string indexName, Query q)
return new SearchResult(resp, !q.NoContent, q.WithScores, q.WithPayloads, q.ExplainScore);
}
- ///
- /// Dump the contents of a synonym group.
- ///
- /// The index name
- /// Pairs of term and an array of synonym groups.
- ///
+ ///
public Dictionary> SynDump(string indexName)
{
var resp = _db.Execute(FT.SYNDUMP, indexName).ToArray();
@@ -633,12 +393,7 @@ public Dictionary> SynDump(string indexName)
// TODO: FT.SPELLCHECK (jedis doesn't have it)
- ///
- /// Dump the contents of a synonym group.
- ///
- /// The index name
- /// Pairs of term and an array of synonym groups.
- ///
+ ///
public async Task>> SynDumpAsync(string indexName)
{
var resp = (await _db.ExecuteAsync(FT.SYNDUMP, indexName)).ToArray();
@@ -652,16 +407,7 @@ public async Task>> SynDumpAsync(string indexNam
return result;
}
- ///
- /// Update a synonym group.
- ///
- /// The index name
- /// Is synonym group to return
- /// does not scan and index, and only documents
- /// that are indexed after the update are affected
- /// The terms
- /// Pairs of term and an array of synonym groups.
- ///
+ ///
public bool SynUpdate(string indexName, string synonymGroupId, bool skipInitialScan = false, params string[] terms)
{
if (terms.Length < 1)
@@ -674,16 +420,7 @@ public bool SynUpdate(string indexName, string synonymGroupId, bool skipInitialS
return _db.Execute(FT.SYNUPDATE, args).OKtoBoolean();
}
- ///
- /// Update a synonym group.
- ///
- /// The index name
- /// Is synonym group to return
- /// does not scan and index, and only documents
- /// that are indexed after the update are affected
- /// The terms
- /// Pairs of term and an array of synonym groups.
- ///
+ ///
public async Task SynUpdateAsync(string indexName, string synonymGroupId, bool skipInitialScan = false, params string[] terms)
{
if (terms.Length < 1)
@@ -696,24 +433,12 @@ public async Task SynUpdateAsync(string indexName, string synonymGroupId,
return (await _db.ExecuteAsync(FT.SYNUPDATE, args)).OKtoBoolean();
}
- ///
- /// Return a distinct set of values indexed in a Tag field.
- ///
- /// The index name
- /// TAG field name
- /// List of TAG field values
- ///
+ ///
public RedisResult[] TagVals(string indexName, string fieldName) => //TODO: consider return Set
- _db.Execute(FT.TAGVALS, indexName, fieldName).ToArray();
-
- ///
- /// Return a distinct set of values indexed in a Tag field.
- ///
- /// The index name
- /// TAG field name
- /// List of TAG field values
- ///
+ _db.Execute(FT.TAGVALS, indexName, fieldName).ToArray();
+
+ ///
public async Task TagValsAsync(string indexName, string fieldName) => //TODO: consider return Set
- (await _db.ExecuteAsync(FT.TAGVALS, indexName, fieldName)).ToArray();
+ (await _db.ExecuteAsync(FT.TAGVALS, indexName, fieldName)).ToArray();
}
}
\ No newline at end of file
diff --git a/src/NRedisStack/Tdigest/ITdigestCommands.cs b/src/NRedisStack/Tdigest/ITdigestCommands.cs
new file mode 100644
index 00000000..bec47178
--- /dev/null
+++ b/src/NRedisStack/Tdigest/ITdigestCommands.cs
@@ -0,0 +1,265 @@
+using NRedisStack.Tdigest.DataTypes;
+using StackExchange.Redis;
+namespace NRedisStack
+{
+
+ public interface ITdigestCommands
+ {
+
+ ///
+ /// Adds one or more observations to a t-digest sketch.
+ ///
+ /// The name of the sketch.
+ /// The value of the observation.
+ /// if executed correctly, error otherwise
+ ///
+ bool Add(RedisKey key, params double[] values);
+
+ ///
+ /// Adds one or more observations to a t-digest sketch.
+ ///
+ /// The name of the sketch.
+ /// The value of the observation.
+ /// if executed correctly, error otherwise
+ ///
+ Task AddAsync(RedisKey key, params double[] values);
+
+ ///
+ /// Estimate the fraction of all observations added which are <= value.
+ ///
+ /// The name of the sketch.
+ /// upper limit of observation value.
+ /// double-reply - estimation of the fraction of all observations added which are <= value
+ ///
+ double[] CDF(RedisKey key, params double[] values);
+
+ ///
+ /// Estimate the fraction of all observations added which are <= value.
+ ///
+ /// The name of the sketch.
+ /// upper limit of observation value.
+ /// double-reply - estimation of the fraction of all observations added which are <= value
+ ///
+ Task CDFAsync(RedisKey key, params double[] values);
+
+ ///
+ /// Allocate memory and initialize a t-digest sketch.
+ ///
+ /// The name of the sketch.
+ /// The compression parameter.
+ /// if executed correctly, error otherwise
+ ///
+ bool Create(RedisKey key, long compression = 100);
+
+ ///
+ /// Allocate memory and initialize a t-digest sketch.
+ ///
+ /// The name of the sketch.
+ /// The compression parameter.
+ /// if executed correctly, error otherwise
+ ///
+ Task CreateAsync(RedisKey key, long compression = 100);
+
+ ///
+ /// Returns information about a sketch.
+ ///
+ /// The name of the sketch.
+ /// information about a sketch
+ ///
+ TdigestInformation Info(RedisKey key);
+
+ ///
+ /// Returns information about a sketch.
+ ///
+ /// The name of the sketch.
+ /// information about a sketch
+ ///
+ Task InfoAsync(RedisKey key);
+
+
+ ///
+ /// Get the maximum observation value from the sketch.
+ ///
+ /// The name of the sketch.
+ /// the maximum observation value from the sketch
+ ///
+ double Max(RedisKey key);
+
+ ///
+ /// Get the maximum observation value from the sketch.
+ ///
+ /// The name of the sketch.
+ /// the maximum observation value from the sketch
+ ///
+ Task MaxAsync(RedisKey key);
+
+ ///
+ /// Get the minimum observation value from the sketch.
+ ///
+ /// The name of the sketch.
+ /// the minimum observation value from the sketch
+ ///
+ double Min(RedisKey key);
+
+ ///
+ /// Get the minimum observation value from the sketch.
+ ///
+ /// The name of the sketch.
+ /// the minimum observation value from the sketch
+ ///
+ Task MinAsync(RedisKey key);
+
+ ///
+ /// Merges all of the values from 'from' keys to 'destination-key' sketch
+ ///
+ /// TSketch to copy observation values to (a t-digest data structure).
+ /// The compression parameter.
+ /// If destination already exists, it is overwritten.
+ /// Sketch to copy observation values from (a t-digest data structure).
+ /// if executed correctly, error otherwise
+ ///
+ bool Merge(RedisKey destinationKey, long compression = default(long), bool overide = false, params RedisKey[] sourceKeys);
+
+ ///
+ /// Merges all of the values from 'from' keys to 'destination-key' sketch
+ ///
+ /// TSketch to copy observation values to (a t-digest data structure).
+ /// The compression parameter.
+ /// If destination already exists, it is overwritten.
+ /// Sketch to copy observation values from (a t-digest data structure).
+ /// if executed correctly, error otherwise
+ ///
+ Task MergeAsync(RedisKey destinationKey, long compression = default(long), bool overide = false, params RedisKey[] sourceKeys);
+
+ ///
+ /// Returns estimates of one or more cutoffs such that a specified fraction of the observations
+ /// added to this t-digest would be less than or equal to each of the specified cutoffs.
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// The desired fraction (between 0 and 1 inclusively).
+ /// An array of results populated with quantile_1, cutoff_1, quantile_2, cutoff_2, ..., quantile_N, cutoff_N.
+ ///
+ double[] Quantile(RedisKey key, params double[] quantile);
+
+ ///
+ /// Returns estimates of one or more cutoffs such that a specified fraction of the observations
+ ///added to this t-digest would be less than or equal to each of the specified cutoffs.
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// The desired fraction (between 0 and 1 inclusively).
+ /// An array of results populated with quantile_1, cutoff_1, quantile_2, cutoff_2, ..., quantile_N, cutoff_N.
+ ///
+ Task QuantileAsync(RedisKey key, params double[] quantile);
+ ///
+ /// Retrieve the estimated rank of value (the number of observations in the sketch
+ /// that are smaller than value + half the number of observations that are equal to value).
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input value, for which the rank will be determined.
+ /// an array of results populated with rank_1, rank_2, ..., rank_N.
+ ///
+ long[] Rank(RedisKey key, params long[] values);
+
+ ///
+ /// Retrieve the estimated rank of value (the number of observations in the sketch
+ /// that are smaller than value + half the number of observations that are equal to value).
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input value, for which the rank will be determined.
+ /// an array of results populated with rank_1, rank_2, ..., rank_N.
+ ///
+ Task RankAsync(RedisKey key, params long[] values);
+
+ ///
+ /// Retrieve the estimated rank of value (the number of observations in the sketch
+ /// that are larger than value + half the number of observations that are equal to value).
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input value, for which the rank will be determined.
+ /// an array of results populated with rank_1, rank_2, ..., rank_N.
+ ///
+ long[] RevRank(RedisKey key, params long[] values);
+
+ ///
+ /// Retrieve the estimated rank of value (the number of observations in the sketch
+ /// that are larger than value + half the number of observations that are equal to value).
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input value, for which the rank will be determined.
+ /// an array of results populated with rank_1, rank_2, ..., rank_N.
+ ///
+ Task RevRankAsync(RedisKey key, params long[] values);
+
+ ///
+ /// Retrieve an estimation of the value with the given the rank.
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input rank, for which the value will be determined.
+ /// an array of results populated with value_1, value_2, ..., value_N.
+ ///
+ double[] ByRank(RedisKey key, params long[] ranks);
+
+ ///
+ /// Retrieve an estimation of the value with the given the rank.
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input rank, for which the value will be determined.
+ /// an array of results populated with value_1, value_2, ..., value_N.
+ ///
+ Task ByRankAsync(RedisKey key, params long[] ranks);
+
+ ///
+ /// Retrieve an estimation of the value with the given the reverse rank.
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input reverse rank, for which the value will be determined.
+ /// an array of results populated with value_1, value_2, ..., value_N.
+ ///
+ double[] ByRevRank(RedisKey key, params long[] ranks);
+
+ ///
+ /// Retrieve an estimation of the value with the given the reverse rank.
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// input reverse rank, for which the value will be determined.
+ /// an array of results populated with value_1, value_2, ..., value_N.
+ ///
+ Task ByRevRankAsync(RedisKey key, params long[] ranks);
+
+ ///
+ /// Reset the sketch - empty the sketch and re-initialize it
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// if executed correctly, error otherwise.
+ ///
+ bool Reset(RedisKey key);
+
+ ///
+ /// Reset the sketch - empty the sketch and re-initialize it
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// if executed correctly, error otherwise.
+ ///
+ Task ResetAsync(RedisKey key);
+
+ ///
+ /// Reset the sketch - empty the sketch and re-initialize it
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// Exclude observation values lower than this quantile.
+ /// Exclude observation values higher than this quantile.
+ /// estimation of the mean value. Will return NaN if the sketch is empty.
+ ///
+ double TrimmedMean(RedisKey key, double lowCutQuantile, double highCutQuantile);
+
+ ///
+ /// Reset the sketch - empty the sketch and re-initialize it
+ ///
+ /// The name of the sketch (a t-digest data structure).
+ /// Exclude observation values lower than this quantile.
+ /// Exclude observation values higher than this quantile.
+ /// estimation of the mean value. Will return NaN if the sketch is empty.
+ ///
+ Task TrimmedMeanAsync(RedisKey key, double lowCutQuantile, double highCutQuantile);
+ }
+}
\ No newline at end of file
diff --git a/src/NRedisStack/Tdigest/TdigestCommands.cs b/src/NRedisStack/Tdigest/TdigestCommands.cs
index ab02de32..b71f8265 100644
--- a/src/NRedisStack/Tdigest/TdigestCommands.cs
+++ b/src/NRedisStack/Tdigest/TdigestCommands.cs
@@ -4,7 +4,7 @@
namespace NRedisStack
{
- public class TdigestCommands
+ public class TdigestCommands : ITdigestCommands
{
IDatabase _db;
public TdigestCommands(IDatabase db)
@@ -12,13 +12,7 @@ public TdigestCommands(IDatabase db)
_db = db;
}
- ///
- /// Adds one or more observations to a t-digest sketch.
- ///
- /// The name of the sketch.
- /// The value of the observation.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool Add(RedisKey key, params double[] values)
{
if (values.Length < 0) throw new ArgumentOutOfRangeException(nameof(values));
@@ -32,13 +26,7 @@ public bool Add(RedisKey key, params double[] values)
return _db.Execute(TDIGEST.ADD, args).OKtoBoolean();
}
- ///
- /// Adds one or more observations to a t-digest sketch.
- ///
- /// The name of the sketch.
- /// The value of the observation.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task AddAsync(RedisKey key, params double[] values)
{
if (values.Length < 0) throw new ArgumentOutOfRangeException(nameof(values));
@@ -52,220 +40,81 @@ public async Task AddAsync(RedisKey key, params double[] values)
return (await _db.ExecuteAsync(TDIGEST.ADD, args)).OKtoBoolean();
}
- // ///
- // /// Adds one or more observations to a t-digest sketch.
- // ///
- // /// The name of the sketch.
- // /// The value of the observation.
- // /// The weight of this observation.
- // /// if executed correctly, error otherwise
- // ///
- // public bool Add(RedisKey key, double item, long weight)
- // {
- // if (weight < 0) throw new ArgumentOutOfRangeException(nameof(weight));
-
- // return _db.Execute(TDIGEST.ADD, key, item, weight).OKtoBoolean();
- // }
-
- // ///
- // /// Adds one or more observations to a t-digest sketch.
- // ///
- // /// The name of the sketch.
- // /// The value of the observation.
- // /// The weight of this observation.
- // /// if executed correctly, error otherwise
- // ///
- // public async Task AddAsync(RedisKey key, double item, int weight)
- // {
- // if (weight < 0) throw new ArgumentOutOfRangeException(nameof(weight));
-
- // var result = await _db.ExecuteAsync(TDIGEST.ADD, key, item, weight);
- // return result.OKtoBoolean();
- // }
-
- // ///
- // /// Adds one or more observations to a t-digest sketch.
- // ///
- // /// The name of the sketch.
- // /// Tuple of the value of the observation and The weight of this observation.
- // /// if executed correctly, error otherwise
- // ///
- // public bool Add(RedisKey key, params Tuple[] valueWeight)
- // {
- // if (valueWeight.Length < 1)
- // throw new ArgumentOutOfRangeException(nameof(valueWeight));
-
- // var args = new List { key };
-
- // foreach (var pair in valueWeight)
- // {
- // if (pair.Item2 < 0) throw new ArgumentOutOfRangeException(nameof(pair.Item2));
- // args.Add(pair.Item1);
- // args.Add(pair.Item2);
- // }
- // return _db.Execute(TDIGEST.ADD, args).OKtoBoolean();
- // }
-
- // ///
- // /// Adds one or more observations to a t-digest sketch.
- // ///
- // /// The name of the sketch.
- // /// Tuple of the value of the observation and The weight of this observation.
- // /// if executed correctly, error otherwise
- // ///
- // public async Task AddAsync(RedisKey key, params Tuple[] valueWeight)
- // {
- // if (valueWeight.Length < 1)
- // throw new ArgumentOutOfRangeException(nameof(valueWeight));
-
- // var args = new List { key };
-
- // foreach (var pair in valueWeight)
- // {
- // if (pair.Item2 < 0) throw new ArgumentOutOfRangeException(nameof(pair.Item2));
- // args.Add(pair.Item1);
- // args.Add(pair.Item2);
- // }
- // return (await _db.ExecuteAsync(TDIGEST.ADD, args)).OKtoBoolean();
- // }
-
- ///
- /// Estimate the fraction of all observations added which are <= value.
- ///
- /// The name of the sketch.
- /// upper limit of observation value.
- /// double-reply - estimation of the fraction of all observations added which are <= value
- ///
+ ///
public double[] CDF(RedisKey key, params double[] values)
{
- var args = new List(values.Length +1) { key };
- foreach(var value in values) args.Add(value);
+ var args = new List(values.Length + 1) { key };
+ foreach (var value in values) args.Add(value);
return _db.Execute(TDIGEST.CDF, args).ToDoubleArray();
}
- ///
- /// Estimate the fraction of all observations added which are <= value.
- ///
- /// The name of the sketch.
- /// upper limit of observation value.
- /// double-reply - estimation of the fraction of all observations added which are <= value
- ///
+ ///
public async Task CDFAsync(RedisKey key, params double[] values)
{
- var args = new List(values.Length +1) { key };
- foreach(var value in values) args.Add(value);
+ var args = new List(values.Length + 1) { key };
+ foreach (var value in values) args.Add(value);
return (await _db.ExecuteAsync(TDIGEST.CDF, args)).ToDoubleArray();
}
- ///
- /// Allocate memory and initialize a t-digest sketch.
- ///
- /// The name of the sketch.
- /// The compression parameter.
- /// if executed correctly, error otherwise
- ///
+ ///
public bool Create(RedisKey key, long compression = 100)
{
return _db.Execute(TDIGEST.CREATE, key, TdigestArgs.COMPRESSION, compression).OKtoBoolean();
}
- ///
- /// Allocate memory and initialize a t-digest sketch.
- ///
- /// The name of the sketch.
- /// The compression parameter.
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task CreateAsync(RedisKey key, long compression = 100)
{
return (await _db.ExecuteAsync(TDIGEST.CREATE, key, TdigestArgs.COMPRESSION, compression)).OKtoBoolean();
}
- ///
- /// Returns information about a sketch.
- ///
- /// The name of the sketch.
- /// information about a sketch
- ///
+ ///
public TdigestInformation Info(RedisKey key)
{
return _db.Execute(TDIGEST.INFO, key).ToTdigestInfo();
}
- ///
- /// Returns information about a sketch.
- ///
- /// The name of the sketch.
- /// information about a sketch
- ///
+ ///
public async Task InfoAsync(RedisKey key)
{
return (await _db.ExecuteAsync(TDIGEST.INFO, key)).ToTdigestInfo();
}
- ///
- /// Get the maximum observation value from the sketch.
- ///
- /// The name of the sketch.
- /// the maximum observation value from the sketch
- ///
+ ///
public double Max(RedisKey key)
{
var result = _db.Execute(TDIGEST.MAX, key);
return result.ToDouble();
}
- ///
- /// Get the maximum observation value from the sketch.
- ///
- /// The name of the sketch.
- /// the maximum observation value from the sketch
- ///
+ ///
public async Task MaxAsync(RedisKey key)
{
var result = await _db.ExecuteAsync(TDIGEST.MAX, key);
return result.ToDouble();
}
- ///
- /// Get the minimum observation value from the sketch.
- ///
- /// The name of the sketch.
- /// the minimum observation value from the sketch
- ///
+ ///
public double Min(RedisKey key)
{
return _db.Execute(TDIGEST.MIN, key).ToDouble();
}
- ///
- /// Get the minimum observation value from the sketch.
- ///
- /// The name of the sketch.
- /// the minimum observation value from the sketch
- ///
+ ///
public async Task MinAsync(RedisKey key)
{
return (await _db.ExecuteAsync(TDIGEST.MIN, key)).ToDouble();
}
- ///
- /// Merges all of the values from 'from' keys to 'destination-key' sketch
- ///
- /// TSketch to copy observation values to (a t-digest data structure).
- /// The compression parameter.
- /// If destination already exists, it is overwritten.
- /// Sketch to copy observation values from (a t-digest data structure).
- /// if executed correctly, error otherwise
- ///
+ ///
public bool Merge(RedisKey destinationKey, long compression = default(long), bool overide = false, params RedisKey[] sourceKeys)
{
if (sourceKeys.Length < 1) throw new ArgumentOutOfRangeException(nameof(sourceKeys));
int numkeys = sourceKeys.Length;
- var args = new List() { destinationKey, numkeys};
- foreach(var key in sourceKeys)
+ var args = new List() { destinationKey, numkeys };
+ foreach (var key in sourceKeys)
{
args.Add(key);
}
@@ -276,7 +125,7 @@ public async Task MinAsync(RedisKey key)
args.Add(compression);
}
- if(overide)
+ if (overide)
{
args.Add("OVERRIDE");
}
@@ -284,22 +133,14 @@ public async Task MinAsync(RedisKey key)
return _db.Execute(TDIGEST.MERGE, args).OKtoBoolean();
}
- ///
- /// Merges all of the values from 'from' keys to 'destination-key' sketch
- ///
- /// TSketch to copy observation values to (a t-digest data structure).
- /// The compression parameter.
- /// If destination already exists, it is overwritten.
- /// Sketch to copy observation values from (a t-digest data structure).
- /// if executed correctly, error otherwise
- ///
+ ///
public async Task MergeAsync(RedisKey destinationKey, long compression = default(long), bool overide = false, params RedisKey[] sourceKeys)
{
if (sourceKeys.Length < 1) throw new ArgumentOutOfRangeException(nameof(sourceKeys));
int numkeys = sourceKeys.Length;
- var args = new List() { destinationKey, numkeys};
- foreach(var key in sourceKeys)
+ var args = new List() { destinationKey, numkeys };
+ foreach (var key in sourceKeys)
{
args.Add(key);
}
@@ -310,7 +151,7 @@ public async Task MinAsync(RedisKey key)
args.Add(compression);
}
- if(overide)
+ if (overide)
{
args.Add("OVERRIDE");
}
@@ -318,14 +159,7 @@ public async Task MinAsync(RedisKey key)
return (await _db.ExecuteAsync(TDIGEST.MERGE, args)).OKtoBoolean();
}
- ///
- /// Returns estimates of one or more cutoffs such that a specified fraction of the observations
- /// added to this t-digest would be less than or equal to each of the specified cutoffs.
- ///
- /// The name of the sketch (a t-digest data structure).
- /// The desired fraction (between 0 and 1 inclusively).
- /// An array of results populated with quantile_1, cutoff_1, quantile_2, cutoff_2, ..., quantile_N, cutoff_N.
- ///
+ ///
public double[] Quantile(RedisKey key, params double[] quantile)
{
if (quantile.Length < 1) throw new ArgumentOutOfRangeException(nameof(quantile));
@@ -336,14 +170,8 @@ public double[] Quantile(RedisKey key, params double[] quantile)
return _db.Execute(TDIGEST.QUANTILE, args).ToDoubleArray();
}
- ///
- /// Returns estimates of one or more cutoffs such that a specified fraction of the observations
///added to this t-digest would be less than or equal to each of the specified cutoffs.
- ///
- /// The name of the sketch (a t-digest data structure).
- /// The desired fraction (between 0 and 1 inclusively).
- /// An array of results populated with quantile_1, cutoff_1, quantile_2, cutoff_2, ..., quantile_N, cutoff_N.
- ///
+ ///
public async Task QuantileAsync(RedisKey key, params double[] quantile)
{
if (quantile.Length < 1) throw new ArgumentOutOfRangeException(nameof(quantile));
@@ -354,14 +182,7 @@ public async Task QuantileAsync(RedisKey key, params double[] quantile
return (await _db.ExecuteAsync(TDIGEST.QUANTILE, args)).ToDoubleArray();
}
- ///
- /// Retrieve the estimated rank of value (the number of observations in the sketch
- /// that are smaller than value + half the number of observations that are equal to value).
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input value, for which the rank will be determined.
- /// an array of results populated with rank_1, rank_2, ..., rank_N.
- ///
+ ///
public long[] Rank(RedisKey key, params long[] values)
{
if (values.Length < 1) throw new ArgumentOutOfRangeException(nameof(values));
@@ -371,14 +192,7 @@ public long[] Rank(RedisKey key, params long[] values)
return _db.Execute(TDIGEST.RANK, args).ToLongArray();
}
- ///
- /// Retrieve the estimated rank of value (the number of observations in the sketch
- /// that are smaller than value + half the number of observations that are equal to value).
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input value, for which the rank will be determined.
- /// an array of results populated with rank_1, rank_2, ..., rank_N.
- ///
+ ///
public async Task RankAsync(RedisKey key, params long[] values)
{
if (values.Length < 1) throw new ArgumentOutOfRangeException(nameof(values));
@@ -388,14 +202,7 @@ public async Task RankAsync(RedisKey key, params long[] values)
return (await _db.ExecuteAsync(TDIGEST.RANK, args)).ToLongArray();
}
- ///
- /// Retrieve the estimated rank of value (the number of observations in the sketch
- /// that are larger than value + half the number of observations that are equal to value).
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input value, for which the rank will be determined.
- /// an array of results populated with rank_1, rank_2, ..., rank_N.
- ///
+ ///
public long[] RevRank(RedisKey key, params long[] values)
{
if (values.Length < 1) throw new ArgumentOutOfRangeException(nameof(values));
@@ -405,30 +212,17 @@ public long[] RevRank(RedisKey key, params long[] values)
return _db.Execute(TDIGEST.REVRANK, args).ToLongArray();
}
- ///
- /// Retrieve the estimated rank of value (the number of observations in the sketch
- /// that are larger than value + half the number of observations that are equal to value).
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input value, for which the rank will be determined.
- /// an array of results populated with rank_1, rank_2, ..., rank_N.
- ///
+ ///
public async Task RevRankAsync(RedisKey key, params long[] values)
{
if (values.Length < 1) throw new ArgumentOutOfRangeException(nameof(values));
var args = new List(values.Length + 1) { key };
foreach (var v in values) args.Add(v);
- return ( await _db.ExecuteAsync(TDIGEST.REVRANK, args)).ToLongArray();
+ return (await _db.ExecuteAsync(TDIGEST.REVRANK, args)).ToLongArray();
}
- ///
- /// Retrieve an estimation of the value with the given the rank.
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input rank, for which the value will be determined.
- /// an array of results populated with value_1, value_2, ..., value_N.
- ///
+ ///
public double[] ByRank(RedisKey key, params long[] ranks)
{
if (ranks.Length < 1) throw new ArgumentOutOfRangeException(nameof(ranks));
@@ -438,13 +232,7 @@ public double[] ByRank(RedisKey key, params long[] ranks)
return _db.Execute(TDIGEST.BYRANK, args).ToDoubleArray();
}
- ///
- /// Retrieve an estimation of the value with the given the rank.
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input rank, for which the value will be determined.
- /// an array of results populated with value_1, value_2, ..., value_N.
- ///
+ ///
public async Task ByRankAsync(RedisKey key, params long[] ranks)
{
if (ranks.Length < 1) throw new ArgumentOutOfRangeException(nameof(ranks));
@@ -454,13 +242,7 @@ public async Task ByRankAsync(RedisKey key, params long[] ranks)
return (await _db.ExecuteAsync(TDIGEST.BYRANK, args)).ToDoubleArray();
}
- ///
- /// Retrieve an estimation of the value with the given the reverse rank.
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input reverse rank, for which the value will be determined.
- /// an array of results populated with value_1, value_2, ..., value_N.
- ///
+ ///
public double[] ByRevRank(RedisKey key, params long[] ranks)
{
if (ranks.Length < 1) throw new ArgumentOutOfRangeException(nameof(ranks));
@@ -470,65 +252,35 @@ public double[] ByRevRank(RedisKey key, params long[] ranks)
return _db.Execute(TDIGEST.BYREVRANK, args).ToDoubleArray();
}
- ///
- /// Retrieve an estimation of the value with the given the reverse rank.
- ///
- /// The name of the sketch (a t-digest data structure).
- /// input reverse rank, for which the value will be determined.
- /// an array of results populated with value_1, value_2, ..., value_N.
- ///
+ ///
public async Task ByRevRankAsync(RedisKey key, params long[] ranks)
{
if (ranks.Length < 1) throw new ArgumentOutOfRangeException(nameof(ranks));
var args = new List(ranks.Length + 1) { key };
foreach (var v in ranks) args.Add(v);
- return ( await _db.ExecuteAsync(TDIGEST.BYREVRANK, args)).ToDoubleArray();
+ return (await _db.ExecuteAsync(TDIGEST.BYREVRANK, args)).ToDoubleArray();
}
- ///
- /// Reset the sketch - empty the sketch and re-initialize it
- ///
- /// The name of the sketch (a t-digest data structure).
- /// if executed correctly, error otherwise.
- ///
+ ///
public bool Reset(RedisKey key)
{
return _db.Execute(TDIGEST.RESET, key).OKtoBoolean();
}
- ///
- /// Reset the sketch - empty the sketch and re-initialize it
- ///
- /// The name of the sketch (a t-digest data structure).
- /// if executed correctly, error otherwise.
- ///
+ ///
public async Task ResetAsync(RedisKey key)
{
return (await _db.ExecuteAsync(TDIGEST.RESET, key)).OKtoBoolean();
}
- ///
- /// Reset the sketch - empty the sketch and re-initialize it
- ///
- /// The name of the sketch (a t-digest data structure).
- /// Exclude observation values lower than this quantile.
- /// Exclude observation values higher than this quantile.
- /// estimation of the mean value. Will return NaN if the sketch is empty.
- ///
+ ///
public double TrimmedMean(RedisKey key, double lowCutQuantile, double highCutQuantile)
{
return _db.Execute(TDIGEST.TRIMMED_MEAN, key, lowCutQuantile, highCutQuantile).ToDouble();
}
- ///
- /// Reset the sketch - empty the sketch and re-initialize it
- ///
- /// The name of the sketch (a t-digest data structure).
- /// Exclude observation values lower than this quantile.
- /// Exclude observation values higher than this quantile.
- /// estimation of the mean value. Will return NaN if the sketch is empty.
- ///
+ ///
public async Task TrimmedMeanAsync(RedisKey key, double lowCutQuantile, double highCutQuantile)
{
return (await _db.ExecuteAsync(TDIGEST.TRIMMED_MEAN, key, lowCutQuantile, highCutQuantile)).ToDouble();
diff --git a/src/NRedisStack/TimeSeries/ITimeSeriesCommands.cs b/src/NRedisStack/TimeSeries/ITimeSeriesCommands.cs
new file mode 100644
index 00000000..61a20ac1
--- /dev/null
+++ b/src/NRedisStack/TimeSeries/ITimeSeriesCommands.cs
@@ -0,0 +1,634 @@
+using NRedisStack.Literals.Enums;
+using NRedisStack.DataTypes;
+namespace NRedisStack
+{
+ public interface ITimeSeriesCommands
+ {
+ #region Create
+
+ ///
+ /// Create a new time-series.
+ ///
+ /// Key name for timeseries
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// Optinal: Define handling of duplicate samples behavior (avalible for RedisTimeseries >= 1.4)
+ /// If the operation executed successfully
+ ///
+ bool Create(string key, long? retentionTime = null, IReadOnlyCollection labels = null, bool? uncompressed = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null);
+
+ ///
+ /// Create a new time-series.
+ ///
+ /// Key name for timeseries
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// Optinal: Define handling of duplicate samples behavior (avalible for RedisTimeseries >= 1.4)
+ /// If the operation executed successfully
+ ///
+ Task CreateAsync(string key, long? retentionTime = null, IReadOnlyCollection labels = null, bool? uncompressed = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null);
+
+ #endregion
+
+ #region Update
+
+ ///
+ /// Update the retention, labels of an existing key.
+ ///
+ /// Key name for timeseries
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// Optinal: Define handling of duplicate samples behavior (avalible for RedisTimeseries >= 1.4)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// If the operation executed successfully
+ ///
+ bool Alter(string key, long? retentionTime = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null, IReadOnlyCollection? labels = null);
+
+ ///
+ /// Update the retention, labels of an existing key.
+ ///
+ /// Key name for timeseries
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// Optinal: Define handling of duplicate samples behavior (avalible for RedisTimeseries >= 1.4)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// If the operation executed successfully
+ ///
+ Task AlterAsync(string key, long? retentionTime = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null, IReadOnlyCollection? labels = null);
+
+ ///
+ /// Append (or create and append) a new sample to the series.
+ ///
+ /// Key name for timeseries
+ /// TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock)
+ /// Numeric data value of the sample.
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// Optioal: overwrite key and database configuration for DUPLICATE_POLICY
+ /// The timestamp value of the new sample
+ ///
+ TimeStamp Add(string key, TimeStamp timestamp, double value, long? retentionTime = null,
+ IReadOnlyCollection labels = null, bool? uncompressed = null,
+ long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null);
+
+ ///
+ /// Append (or create and append) a new sample to the series.
+ ///
+ /// Key name for timeseries
+ /// TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock)
+ /// Numeric data value of the sample.
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// Optioal: overwrite key and database configuration for DUPLICATE_POLICY
+ /// The timestamp value of the new sample
+ ///
+ Task AddAsync(string key, TimeStamp timestamp, double value, long? retentionTime = null, IReadOnlyCollection labels = null, bool? uncompressed = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null);
+ ///
+ /// Append new samples to multiple series.
+ ///
+ /// An Collection of (key, timestamp, value) tuples
+ /// List of timestamps of the new samples
+ ///
+ IReadOnlyList MAdd(IReadOnlyCollection<(string key, TimeStamp timestamp, double value)> sequence);
+ ///
+ /// Append new samples to multiple series.
+ ///
+ /// An Collection of (key, timestamp, value) tuples
+ /// List of timestamps of the new samples
+ ///
+ Task> MAddAsync(IReadOnlyCollection<(string key, TimeStamp timestamp, double value)> sequence);
+
+ ///
+ /// Creates a new sample that increments the latest sample's value.
+ ///
+ /// Key name for timeseries
+ /// Delta to add
+ /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock)
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// The latests sample timestamp (updated sample)
+ ///
+ TimeStamp IncrBy(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null);
+
+ ///
+ /// Creates a new sample that increments the latest sample's value.
+ ///
+ /// Key name for timeseries
+ /// Delta to add
+ /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock)
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// The latests sample timestamp (updated sample)
+ ///
+ Task IncrByAsync(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null);
+
+ ///
+ /// Creates a new sample that decrements the latest sample's value.
+ ///
+ /// Key name for timeseries
+ /// Delta to substract
+ /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock)
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// The latests sample timestamp (updated sample)
+ ///
+ TimeStamp DecrBy(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null);
+
+ ///
+ /// Creates a new sample that decrements the latest sample's value.
+ ///
+ /// Key name for timeseries
+ /// Delta to substract
+ /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock)
+ /// Optional: Maximum age for samples compared to last event time (in milliseconds)
+ /// Optional: Collaction of label-value pairs that represent metadata labels of the key
+ /// Optional: Adding this flag will keep data in an uncompressed form
+ /// Optional: Each time-series uses chunks of memory of fixed size for time series samples.
+ /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes)
+ /// The latests sample timestamp (updated sample)
+ ///
+ Task DecrByAsync(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null);
+
+ ///
+ /// Delete data points for a given timeseries and interval range in the form of start and end delete timestamps.
+ /// The given timestamp interval is closed (inclusive), meaning start and end data points will also be deleted.
+ ///
+ /// Key name for timeseries
+ /// Start timestamp for the range deletion.
+ /// End timestamp for the range deletion.
+ /// The count of deleted items
+ ///
+ long Del(string key, TimeStamp fromTimeStamp, TimeStamp toTimeStamp);
+
+ ///
+ /// Delete data points for a given timeseries and interval range in the form of start and end delete timestamps.
+ /// The given timestamp interval is closed (inclusive), meaning start and end data points will also be deleted.
+ ///
+ /// Key name for timeseries
+ /// Start timestamp for the range deletion.
+ /// End timestamp for the range deletion.
+ /// The count of deleted items
+ ///
+ Task DelAsync(string key, TimeStamp fromTimeStamp, TimeStamp toTimeStamp);
+
+ #endregion
+
+ #region Aggregation, Compaction, Downsampling
+
+ ///
+ /// Create a compaction rule.
+ ///
+ /// Key name for source time series
+ /// TimeSeries rule:
+ /// Key name for destination time series, Aggregation type and Time bucket for aggregation in milliseconds
+ /// ensures that there is a bucket that starts
+ /// exactly at alignTimestamp and aligns all other buckets accordingly.
+ /// It is expressed in milliseconds. The default value is 0 aligned with the epoch
+ /// If the operation executed successfully
+ ///
+ bool CreateRule(string sourceKey, TimeSeriesRule rule, long alignTimestamp = 0);
+
+ ///
+ /// Create a compaction rule.
+ ///
+ /// Key name for source time series
+ /// TimeSeries rule:
+ /// Key name for destination time series, Aggregation type and Time bucket for aggregation in milliseconds
+ /// ensures that there is a bucket that starts
+ /// exactly at alignTimestamp and aligns all other buckets accordingly.
+ /// It is expressed in milliseconds. The default value is 0 aligned with the epoch
+ /// If the operation executed successfully
+ ///
+ Task CreateRuleAsync(string sourceKey, TimeSeriesRule rule, long alignTimestamp = 0);
+
+ ///
+ /// Deletes a compaction rule.
+ ///
+ /// Key name for source time series
+ /// Key name for destination time series
+ /// If the operation executed successfully
+ ///
+ bool DeleteRule(string sourceKey, string destKey);
+
+ ///
+ /// Deletes a compaction rule.
+ ///
+ /// Key name for source time series
+ /// Key name for destination time series
+ /// If the operation executed successfully
+ ///
+ Task DeleteRuleAsync(string sourceKey, string destKey);
+
+ #endregion
+
+ #region Query
+
+ ///
+ /// Get the last sample.
+ ///
+ /// Key name for timeseries
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// TimeSeriesTuple that represents the last sample. Null if the series is empty.
+ ///
+ TimeSeriesTuple? Get(string key, bool latest = false);
+
+ ///
+ /// Get the last sample.
+ ///
+ /// Key name for timeseries
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// TimeSeriesTuple that represents the last sample. Null if the series is empty.
+ ///
+ Task GetAsync(string key, bool latest = false);
+
+ ///
+ /// Get the last samples matching the specific filter.
+ ///
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// A sequence of filters
+ /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series
+ /// Optional: returns a subset of the label-value pairs that represent metadata labels of the time series
+ /// The command returns the last sample for entries with labels matching the specified filter.
+ ///
+ IReadOnlyList<(string key, IReadOnlyList labels, TimeSeriesTuple value)> MGet(IReadOnlyCollection filter, bool latest = false,
+ bool? withLabels = null, IReadOnlyCollection? selectedLabels = null);
+
+ ///
+ /// Get the last samples matching the specific filter.
+ ///
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// A sequence of filters
+ /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series
+ /// Optional: returns a subset of the label-value pairs that represent metadata labels of the time series
+ /// The command returns the last sample for entries with labels matching the specified filter.
+ ///
+ Task labels, TimeSeriesTuple value)>> MGetAsync(IReadOnlyCollection filter, bool latest = false,
+ bool? withLabels = null, IReadOnlyCollection? selectedLabels = null);
+
+ ///
+ /// Query a range.
+ ///
+ /// Key name for timeseries
+ /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp.
+ /// End timestamp for range query, + can be used to express the maximum possible timestamp.
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// Optional: List of timestamps to filter the result by specific timestamps
+ /// Optional: Filter result by value using minimum and maximum
+ /// Optional: Returned list size.
+ /// Optional: Timestamp for alignment control for aggregation.
+ /// Optional: Aggregation type
+ /// Optional: Time bucket for aggregation in milliseconds
+ /// Optional: controls how bucket timestamps are reported.
+ /// Optional: when specified, reports aggregations also for empty buckets
+ /// A list of TimeSeriesTuple
+ ///
+ IReadOnlyList Range(string key,
+ TimeStamp fromTimeStamp,
+ TimeStamp toTimeStamp,
+ bool latest = false,
+ IReadOnlyCollection? filterByTs = null,
+ (long, long)? filterByValue = null,
+ long? count = null,
+ TimeStamp? align = null,
+ TsAggregation? aggregation = null,
+ long? timeBucket = null,
+ TsBucketTimestamps? bt = null,
+ bool empty = false);
+ ///
+ /// Query a range.
+ ///
+ /// Key name for timeseries
+ /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp.
+ /// End timestamp for range query, + can be used to express the maximum possible timestamp.
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// Optional: List of timestamps to filter the result by specific timestamps
+ /// Optional: Filter result by value using minimum and maximum
+ /// Optional: Returned list size.
+ /// Optional: Timestamp for alignment control for aggregation.
+ /// Optional: Aggregation type
+ /// Optional: Time bucket for aggregation in milliseconds
+ /// Optional: controls how bucket timestamps are reported.
+ /// Optional: when specified, reports aggregations also for empty buckets
+ /// A list of TimeSeriesTuple
+ ///
+ Task> RangeAsync(string key,
+ TimeStamp fromTimeStamp,
+ TimeStamp toTimeStamp,
+ bool latest = false,
+ IReadOnlyCollection? filterByTs = null,
+ (long, long)? filterByValue = null,
+ long? count = null,
+ TimeStamp? align = null,
+ TsAggregation? aggregation = null,
+ long? timeBucket = null,
+ TsBucketTimestamps? bt = null,
+ bool empty = false);
+
+ ///
+ /// Query a range in reverse direction.
+ ///
+ /// Key name for timeseries
+ /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp.
+ /// End timestamp for range query, + can be used to express the maximum possible timestamp.
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// Optional: List of timestamps to filter the result by specific timestamps
+ /// Optional: Filter result by value using minimum and maximum
+ /// Optional: Returned list size.
+ /// Optional: Timestamp for alignment control for aggregation.
+ /// Optional: Aggregation type
+ /// Optional: Time bucket for aggregation in milliseconds
+ /// Optional: controls how bucket timestamps are reported.
+ /// Optional: when specified, reports aggregations also for empty buckets
+ /// A list of TimeSeriesTuple
+ ///
+ IReadOnlyList RevRange(string key,
+ TimeStamp fromTimeStamp,
+ TimeStamp toTimeStamp,
+ bool latest = false,
+ IReadOnlyCollection? filterByTs = null,
+ (long, long)? filterByValue = null,
+ long? count = null,
+ TimeStamp? align = null,
+ TsAggregation? aggregation = null,
+ long? timeBucket = null,
+ TsBucketTimestamps? bt = null,
+ bool empty = false);
+
+ ///
+ /// Query a range in reverse direction.
+ ///
+ /// Key name for timeseries
+ /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp.
+ /// End timestamp for range query, + can be used to express the maximum possible timestamp.
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// Optional: List of timestamps to filter the result by specific timestamps
+ /// Optional: Filter result by value using minimum and maximum
+ /// Optional: Returned list size.
+ /// Optional: Timestamp for alignment control for aggregation.
+ /// Optional: Aggregation type
+ /// Optional: Time bucket for aggregation in milliseconds
+ /// Optional: controls how bucket timestamps are reported.
+ /// Optional: when specified, reports aggregations also for empty buckets
+ /// A list of TimeSeriesTuple
+ ///
+ Task> RevRangeAsync(string key,
+ TimeStamp fromTimeStamp,
+ TimeStamp toTimeStamp,
+ bool latest = false,
+ IReadOnlyCollection? filterByTs = null,
+ (long, long)? filterByValue = null,
+ long? count = null,
+ TimeStamp? align = null,
+ TsAggregation? aggregation = null,
+ long? timeBucket = null,
+ TsBucketTimestamps? bt = null,
+ bool empty = false);
+
+ ///
+ /// Query a timestamp range across multiple time-series by filters.
+ ///
+ /// Start timestamp for the range query. - can be used to express the minimum possible timestamp.
+ /// End timestamp for range query, + can be used to express the maximum possible timestamp.
+ /// A sequence of filters
+ /// is used when a time series is a compaction. With LATEST, TS.MRANGE also reports
+ /// the compacted value of the latest possibly partial bucket, given that this bucket's start time falls
+ /// within [fromTimestamp, toTimestamp]. Without LATEST, TS.MRANGE does not report the latest possibly partial bucket.
+ /// When a time series is not a compaction, LATEST is ignored.
+ /// Optional: List of timestamps to filter the result by specific timestamps
+ /// Optional: Filter result by value using minimum and maximum
+ /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series
+ /// Optional: Include in the reply only a subset of the key-value pair labels of a series.
+ /// Optional: Maximum number of returned results per time-series.
+ /// Optional: Timestamp for alignment control for aggregation.
+ /// Optional: Aggregation type
+ /// Optional: Time bucket for aggregation in milliseconds
+ /// Optional: controls how bucket timestamps are reported.
+ /// Optional: when specified, reports aggregations also for empty buckets
+ /// Optional: Grouping by fields the results, and applying reducer functions on each group.
+ /// A list of (key, labels, values) tuples. Each tuple contains the key name, its labels and the values which satisfies the given range and filters.
+ ///
+ IReadOnlyList<(string key, IReadOnlyList labels, IReadOnlyList values)> MRange(
+ TimeStamp fromTimeStamp,
+ TimeStamp toTimeStamp,
+ IReadOnlyCollection