Improve Comment Readability (#69)

Signed-off-by: Ryan Russell <git@ryanrussell.org>
This commit is contained in:
Ryan Russell 2022-05-31 15:07:00 -05:00 committed by GitHub
parent f641ba83b9
commit eae5c08e76
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 9 additions and 9 deletions

View File

@ -12,7 +12,7 @@ namespace dfly {
// DASH: Dynamic And Scalable Hashing.
// TODO: We could name it DACHE: Dynamic and Adaptive caCHE.
// After all, we added additionaly improvements we added as part of the dragonfly project,
// After all, we added additionally improvements we added as part of the dragonfly project,
// that probably justify a right to choose our own name for this data structure.
struct BasicDashPolicy {
enum { kSlotNum = 12, kBucketNum = 64, kStashBucketNum = 2 };
@ -225,7 +225,7 @@ class DashTable : public detail::DashTableBase {
// In practice traversal is limited to a single segment. The operation is read-only and
// simulates insertion process. 'cb' must accept bucket_iterator.
// Note: the interface a bit hacky.
// The functions call cb on physical buckets with version smalller than ver_threshold that
// The functions call cb on physical buckets with version smaller than ver_threshold that
// due to entry movements might update its version to version greater than ver_threshold.
//
// These are not const functions because they send non-const iterators that allow

View File

@ -555,7 +555,7 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
/*both clear this bucket and its neighbor bucket*/
void RemoveStashReference(unsigned stash_pos, Hash_t key_hash);
// Returns slot id if insertion is succesful, -1 if no free slots are found.
// Returns slot id if insertion is successful, -1 if no free slots are found.
template <typename U, typename V>
int TryInsertToBucket(unsigned bidx, U&& key, V&& value, uint8_t meta_hash, bool probe) {
auto& b = bucket_[bidx];

View File

@ -20,7 +20,7 @@ class ExpirePeriod {
Set(ms);
}
// alwaws returns milliseconds value.
// always returns milliseconds value.
uint64_t duration_ms() const {
return precision_ ? uint64_t(val_) * 1000 : val_;
}

View File

@ -28,7 +28,7 @@ constexpr inline unsigned long long operator""_KB(unsigned long long x) {
* as a state machine that either returns an offset to the backign storage or the indication
* of the resource that is missing. The advantage of such design is that we can use it in
* asynchronous callbacks without blocking on any IO requests.
* The allocator uses dynanic memory internally. Should be used in a single thread.
* The allocator uses dynamic memory internally. Should be used in a single thread.
*
*/

View File

@ -93,7 +93,7 @@ class Interpreter {
// We have interpreter per thread, not per connection.
// Since we might preempt into different fibers when operating on interpreter
// we must lock it until we finish using it per request.
// Only RunFunction with companions require locking since other functions peform atomically
// Only RunFunction with companions require locking since other functions perform atomically
// without preemptions.
std::lock_guard<::boost::fibers::mutex> Lock() {
return std::lock_guard<::boost::fibers::mutex>{mu_};

View File

@ -10,7 +10,7 @@
namespace dfly {
// blob strings of upto ~64KB. Small sizes are probably predominant
// for in-mmeory workloads, especially for keys.
// for in-memory workloads, especially for keys.
// Please note that this class does not have automatic constructors and destructors, therefore
// it requires explicit management.
class SmallString {

View File

@ -86,7 +86,7 @@ class DbSlice {
Stats GetStats() const;
//! UpdateExpireClock updates the expire clock for this db slice.
//! Must be a wall clock so we could replicate it betweeen machines.
//! Must be a wall clock so we could replicate it between machines.
void UpdateExpireClock(uint64_t now_ms) {
now_ms_ = now_ms;
}

View File

@ -435,7 +435,7 @@ OpStatus Mover::OpFind(Transaction* t, EngineShard* es) {
for (auto k : largs) {
unsigned index = (k == src_) ? 0 : 1;
OpResult<PrimeIterator> res = es->db_slice().Find(t->db_index(), k, OBJ_SET);
if (res && index == 0) { // succesful src find.
if (res && index == 0) { // successful src find.
DCHECK(!res->is_done());
const CompactObj& val = res.value()->second;
SetType st{val.RObjPtr(), val.Encoding()};