Struct raftstore::store::Peer [−][src]
Fields
region_id: u64
The ID of the Region which this Peer belongs to.
tag: String
Peer_tag, “[region <region_id>] <peer_id>”
peer: Peer
The Peer meta information.
raft_group: RawNode<PeerStorage<EK, ER>>
The Raft state machine of this Peer.
peer_cache: RefCell<HashMap<u64, Peer>>
The cache of meta information for Region’s other Peers.
peer_heartbeats: HashMap<u64, Instant>
Record the last instant of each peer’s heartbeat response.
proposals: ProposalQueue<EK::Snapshot>
leader_missing_time: Option<Instant>
leader_lease: Lease
pending_reads: ReadIndexQueue<EK::Snapshot>
leader_unreachable: bool
If it fails to send messages to leader.
should_wake_up: bool
Indicates whether the peer should be woken up.
pending_remove: bool
Whether this peer is destroyed asynchronously. If it’s true,
- when merging, its data in storeMeta will be removed early by the target peer.
- all read requests must be rejected.
pending_messages: Vec<Message>
If a snapshot is being applied asynchronously, messages should not be sent.
peers_start_pending_time: Vec<(u64, Instant)>
Record the instants of peers being added into the configuration. Remove them after they are not pending any more.
down_peer_ids: Vec<u64>
A inaccurate cache about which peer is marked as down.
size_diff_hint: u64
An inaccurate difference in region size since last reset. It is used to decide whether split check is needed.
delete_keys_hint: u64
The count of deleted keys since last reset.
compaction_declined_bytes: u64
An inaccurate difference in region size after compaction. It is used to trigger check split to update approximate size and keys after space reclamation of deleted entries.
approximate_size: u64
Approximate size of the region.
approximate_keys: u64
Approximate keys of the region.
has_calculated_region_size: bool
Whether this region has calculated region size by split-check thread. If we just splitted
the region or ingested one file which may be overlapped with the existed data, the
approximate_size
is not very accurate.
consistency_state: ConsistencyState
The state for consistency check.
pending_request_snapshot_count: Arc<AtomicUsize>
The counter records pending snapshot requests.
last_applying_idx: u64
The index of last scheduled committed raft log.
last_compacted_idx: u64
The index of last compacted raft log. It is used for the next compact log task.
last_urgent_proposal_idx: u64
The index of the latest urgent proposal index.
last_committed_split_idx: u64
The index of the latest committed split command.
raft_log_size_hint: u64
Approximate size of logs that is applied but not compacted yet.
last_proposed_prepare_merge_idx: u64
The index of the latest proposed prepare merge command.
last_committed_prepare_merge_idx: u64
The index of the latest committed prepare merge command.
pending_merge_state: Option<MergeState>
The merge related state. It indicates this Peer is in merging.
want_rollback_merge_peers: HashSet<u64>
The rollback merge proposal can be proposed only when the number
of peers is greater than the majority of all peers.
There are more details in the annotation above
test_node_merge_write_data_to_source_region_after_merging
The peers who want to rollback merge.
catch_up_logs: Option<CatchUpLogs>
Source region is catching up logs for merge.
peer_stat: PeerStat
Write Statistics for PD to schedule hot spot.
bcast_wake_up_time: Option<UtilInstant>
Time of the last attempt to wake up inactive leader.
replication_mode_version: u64
Current replication mode version.
dr_auto_sync_state: DrAutoSyncState
The required replication state at current version.
replication_sync: bool
A flag that caches sync state. It’s set to true when required replication state is reached for current region.
check_stale_conf_ver: u64
The known newest conf version and its corresponding peer list Send to these peers to check whether itself is stale.
check_stale_peers: Vec<Peer>
local_first_replicate: bool
Whether this peer is created by replication and is the first one of this region on local store.
txn_extra_op: Arc<AtomicCell<TxnExtraOp>>
max_ts_sync_status: Arc<AtomicU64>
The max timestamp recorded in the concurrency manager is only updated at leader. So if a peer becomes leader from a follower, the max timestamp can be outdated. We need to update the max timestamp with a latest timestamp from PD before this peer can work. From the least significant to the most, 1 bit marks whether the timestamp is updated, 31 bits for the current epoch version, 32 bits for the current term. The version and term are stored to prevent stale UpdateMaxTimestamp task from marking the lowest bit.
cmd_epoch_checker: CmdEpochChecker<EK::Snapshot>
Check whether this proposal can be proposed based on its epoch.
last_unpersisted_number: u64
The number of the last unpersisted ready.
read_progress: Arc<RegionReadProgress>
Implementations
impl<EK, ER> Peer<EK, ER> where
EK: KvEngine,
ER: RaftEngine,
[src]
EK: KvEngine,
ER: RaftEngine,
pub fn new(
store_id: u64,
cfg: &Config,
sched: Scheduler<RegionTask<EK::Snapshot>>,
engines: Engines<EK, ER>,
region: &Region,
peer: Peer
) -> Result<Peer<EK, ER>>
[src]
store_id: u64,
cfg: &Config,
sched: Scheduler<RegionTask<EK::Snapshot>>,
engines: Engines<EK, ER>,
region: &Region,
peer: Peer
) -> Result<Peer<EK, ER>>
pub fn init_replication_mode(&mut self, state: &mut GlobalReplicationState)
[src]
Sets commit group to the peer.
pub fn switch_replication_mode(&mut self, state: &Mutex<GlobalReplicationState>)
[src]
Updates replication mode.
pub fn activate<T>(&self, ctx: &PollContext<EK, ER, T>)
[src]
Register self to apply_scheduler so that the peer is then usable.
Also trigger RegionChangeEvent::Create
here.
fn next_proposal_index(&self) -> u64
[src]
pub fn get_index_term(&self, idx: u64) -> u64
[src]
pub fn maybe_append_merge_entries(
&mut self,
merge: &CommitMergeRequest
) -> Option<u64>
[src]
&mut self,
merge: &CommitMergeRequest
) -> Option<u64>
pub fn maybe_destroy<T>(
&mut self,
ctx: &PollContext<EK, ER, T>
) -> Option<DestroyPeerJob>
[src]
&mut self,
ctx: &PollContext<EK, ER, T>
) -> Option<DestroyPeerJob>
Tries to destroy itself. Returns a job (if needed) to do more cleaning tasks.
pub fn destroy<T>(
&mut self,
ctx: &PollContext<EK, ER, T>,
keep_data: bool
) -> Result<()>
[src]
&mut self,
ctx: &PollContext<EK, ER, T>,
keep_data: bool
) -> Result<()>
Does the real destroy task which includes:
- Set the region to tombstone;
- Clear data;
- Notify all pending requests.
pub fn is_initialized(&self) -> bool
[src]
pub fn region(&self) -> &Region
[src]
pub fn check_before_tick(&self, cfg: &Config) -> CheckTickResult
[src]
Check whether the peer can be hibernated.
This should be used with check_after_tick
to get a correct conclusion.
pub fn check_after_tick(&self, state: GroupState, res: CheckTickResult) -> bool
[src]
pub fn has_valid_leader(&self) -> bool
[src]
pub fn ping(&mut self)
[src]
Pings if followers are still connected.
Leader needs to know exact progress of followers, and followers just need to know whether leader is still alive.
pub fn has_uncommitted_log(&self) -> bool
[src]
pub fn set_region(
&mut self,
host: &CoprocessorHost<impl KvEngine>,
reader: &mut ReadDelegate,
region: Region
)
[src]
&mut self,
host: &CoprocessorHost<impl KvEngine>,
reader: &mut ReadDelegate,
region: Region
)
Set the region of a peer.
This will update the region of the peer, caller must ensure the region has been preserved in a durable device.
pub fn peer_id(&self) -> u64
[src]
pub fn leader_id(&self) -> u64
[src]
pub fn is_leader(&self) -> bool
[src]
pub fn get_role(&self) -> StateRole
[src]
pub fn get_store(&self) -> &PeerStorage<EK, ER>
[src]
pub fn mut_store(&mut self) -> &mut PeerStorage<EK, ER>
[src]
pub fn is_applying_snapshot(&self) -> bool
[src]
pub fn has_pending_snapshot(&self) -> bool
[src]
Returns true
if the raft group has replicated a snapshot but not committed it yet.
pub fn get_pending_snapshot(&self) -> Option<&Snapshot>
[src]
fn add_ready_metric(&self, ready: &Ready, metrics: &mut RaftReadyMetrics)
[src]
fn add_light_ready_metric(
&self,
light_ready: &LightReady,
metrics: &mut RaftReadyMetrics
)
[src]
&self,
light_ready: &LightReady,
metrics: &mut RaftReadyMetrics
)
pub fn in_joint_state(&self) -> bool
[src]
fn send<T, I>(
&mut self,
trans: &mut T,
msgs: I,
metrics: &mut RaftSendMessageMetrics
) where
T: Transport,
I: IntoIterator<Item = Message>,
[src]
&mut self,
trans: &mut T,
msgs: I,
metrics: &mut RaftSendMessageMetrics
) where
T: Transport,
I: IntoIterator<Item = Message>,
pub fn step<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
m: Message
) -> Result<()>
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
m: Message
) -> Result<()>
Steps the raft message.
pub fn check_peers(&mut self)
[src]
Checks and updates peer_heartbeats
for the peer.
pub fn collect_down_peers(&mut self, max_duration: Duration) -> Vec<PeerStats>
[src]
Collects all down peers.
pub fn collect_pending_peers<T>(
&mut self,
ctx: &PollContext<EK, ER, T>
) -> Vec<Peer>
[src]
&mut self,
ctx: &PollContext<EK, ER, T>
) -> Vec<Peer>
Collects all pending peers and update peers_start_pending_time
.
pub fn any_new_peer_catch_up(&mut self, peer_id: u64) -> bool
[src]
Returns true
if any peer recover from connectivity problem.
A peer can become pending or down if it has not responded for a long time. If it becomes normal again, PD need to be notified.
pub fn check_stale_state<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>
) -> StaleState
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>
) -> StaleState
fn on_role_changed<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
ready: &Ready
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
ready: &Ready
)
fn on_leader_commit_idx_changed(
&mut self,
pre_commit_index: u64,
commit_index: u64
)
[src]
&mut self,
pre_commit_index: u64,
commit_index: u64
)
Correctness depends on the order between calling this function and notifying other peers the new commit index. It is due to the interaction between lease and split/merge.(details are decribed below)
Note that in addition to the hearbeat/append msg, the read index response also can notify
other peers the new commit index. There are three place where TiKV handles read index resquest.
The first place is in raft-rs, so it’s like hearbeat/append msg, call this function and
then send the response. The second place is in Step
, we should use the commit index
of PeerStorage
which is the greatest commit index that can be observed outside.
The third place is in read_index
, handle it like the second one.
fn on_leader_changed<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
leader_id: u64,
term: u64
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
leader_id: u64,
term: u64
)
pub fn ready_to_handle_pending_snap(&self) -> bool
[src]
fn ready_to_handle_read(&self) -> bool
[src]
fn ready_to_handle_unsafe_replica_read(&self, read_index: u64) -> bool
[src]
fn is_splitting(&self) -> bool
[src]
fn is_merging(&self) -> bool
[src]
fn is_merging_strict(&self) -> bool
[src]
pub fn ready_to_handle_request_snapshot(&mut self, request_index: u64) -> bool
[src]
pub fn replication_mode_need_catch_up(&self) -> bool
[src]
Checks if leader needs to keep sending logs for follower.
In DrAutoSync mode, if leader goes to sleep before the region is sync, PD may wait longer time to reach sync state.
pub fn handle_raft_ready_append<T: Transport>(
&mut self,
ctx: &mut PollContext<EK, ER, T>
) -> Option<CollectedReady>
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>
) -> Option<CollectedReady>
pub fn post_raft_ready_append<T: Transport>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
invoke_ctx: InvokeContext,
ready: &mut Ready
) -> Option<ApplySnapResult>
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
invoke_ctx: InvokeContext,
ready: &mut Ready
) -> Option<ApplySnapResult>
pub fn handle_raft_committed_entries<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
committed_entries: Vec<Entry>
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
committed_entries: Vec<Entry>
)
pub fn handle_raft_ready_advance<T: Transport>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
ready: Ready
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
ready: Ready
)
fn response_read<T>(
&self,
read: &mut ReadIndexRequest<EK::Snapshot>,
ctx: &mut PollContext<EK, ER, T>,
replica_read: bool
)
[src]
&self,
read: &mut ReadIndexRequest<EK::Snapshot>,
ctx: &mut PollContext<EK, ER, T>,
replica_read: bool
)
fn post_pending_read_index_on_replica<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>
)
Responses to the ready read index request on the replica, the replica is not a leader.
fn send_read_command<T>(
&self,
ctx: &mut PollContext<EK, ER, T>,
read_cmd: RaftCommand<EK::Snapshot>
)
[src]
&self,
ctx: &mut PollContext<EK, ER, T>,
read_cmd: RaftCommand<EK::Snapshot>
)
fn apply_reads<T>(&mut self, ctx: &mut PollContext<EK, ER, T>, ready: &Ready)
[src]
pub fn post_apply<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
apply_state: RaftApplyState,
applied_index_term: u64,
apply_metrics: &ApplyMetrics
) -> bool
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
apply_state: RaftApplyState,
applied_index_term: u64,
apply_metrics: &ApplyMetrics
) -> bool
pub fn post_split(&mut self)
[src]
fn maybe_renew_leader_lease<T>(
&mut self,
ts: Timespec,
ctx: &mut PollContext<EK, ER, T>,
progress: Option<ReadProgress>
)
[src]
&mut self,
ts: Timespec,
ctx: &mut PollContext<EK, ER, T>,
progress: Option<ReadProgress>
)
Try to renew leader lease.
fn maybe_update_read_progress(
&self,
reader: &mut ReadDelegate,
progress: ReadProgress
)
[src]
&self,
reader: &mut ReadDelegate,
progress: ReadProgress
)
pub fn maybe_campaign(&mut self, parent_is_leader: bool) -> bool
[src]
pub fn propose<T: Transport>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
cb: Callback<EK::Snapshot>,
req: RaftCmdRequest,
err_resp: RaftCmdResponse
) -> bool
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
cb: Callback<EK::Snapshot>,
req: RaftCmdRequest,
err_resp: RaftCmdResponse
) -> bool
Propose a request.
Return true means the request has been proposed successfully.
fn post_propose<T>(
&mut self,
poll_ctx: &mut PollContext<EK, ER, T>,
p: Proposal<EK::Snapshot>
)
[src]
&mut self,
poll_ctx: &mut PollContext<EK, ER, T>,
p: Proposal<EK::Snapshot>
)
fn check_conf_change<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
change_peers: &[ChangePeerRequest],
cc: &impl ConfChangeI
) -> Result<()>
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
change_peers: &[ChangePeerRequest],
cc: &impl ConfChangeI
) -> Result<()>
Validate the ConfChange
requests and check whether it’s safe to
propose these conf change requests.
It’s safe iff at least the quorum of the Raft group is still healthy
right after all conf change is applied.
If ‘allow_remove_leader’ is false then the peer to be removed should
not be the leader.
fn check_joint_state(
&mut self,
cc: &impl ConfChangeI
) -> Result<ProgressTracker>
[src]
&mut self,
cc: &impl ConfChangeI
) -> Result<ProgressTracker>
Check if current joint state can handle this confchange
fn transfer_leader(&mut self, peer: &Peer)
[src]
fn pre_transfer_leader(&mut self, peer: &Peer) -> bool
[src]
fn ready_to_transfer_leader<T>(
&self,
ctx: &mut PollContext<EK, ER, T>,
index: u64,
peer: &Peer
) -> Option<&'static str>
[src]
&self,
ctx: &mut PollContext<EK, ER, T>,
index: u64,
peer: &Peer
) -> Option<&'static str>
fn read_local<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
cb: Callback<EK::Snapshot>
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
cb: Callback<EK::Snapshot>
)
fn pre_read_index(&self) -> Result<()>
[src]
pub fn has_unresolved_reads(&self) -> bool
[src]
pub fn retry_pending_reads(&mut self, cfg: &Config)
[src]
ReadIndex
requests could be lost in network, so on followers commands could queue in
pending_reads
forever. Sending a new ReadIndex
periodically can resolve this.
fn read_index<T: Transport>(
&mut self,
poll_ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
err_resp: RaftCmdResponse,
cb: Callback<EK::Snapshot>
) -> bool
[src]
&mut self,
poll_ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
err_resp: RaftCmdResponse,
cb: Callback<EK::Snapshot>
) -> bool
pub fn get_min_progress(&self) -> Result<(u64, u64)>
[src]
Returns (minimal matched, minimal committed_index)
For now, it is only used in merge.
fn pre_propose_prepare_merge<T>(
&self,
ctx: &mut PollContext<EK, ER, T>,
req: &mut RaftCmdRequest
) -> Result<()>
[src]
&self,
ctx: &mut PollContext<EK, ER, T>,
req: &mut RaftCmdRequest
) -> Result<()>
fn pre_propose<T>(
&self,
poll_ctx: &mut PollContext<EK, ER, T>,
req: &mut RaftCmdRequest
) -> Result<ProposalContext>
[src]
&self,
poll_ctx: &mut PollContext<EK, ER, T>,
req: &mut RaftCmdRequest
) -> Result<ProposalContext>
fn propose_normal<T>(
&mut self,
poll_ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest
) -> Result<Either<u64, u64>>
[src]
&mut self,
poll_ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest
) -> Result<Either<u64, u64>>
Propose normal request to raft
Returns Ok(Either::Left(index)) means the proposal is proposed successfully and is located on index
position.
Ok(Either::Right(index)) means the proposal is rejected by CmdEpochChecker
and the index
is the position of
the last conflict admin cmd.
fn execute_transfer_leader<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
msg: &Message
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
msg: &Message
)
fn propose_transfer_leader<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
cb: Callback<EK::Snapshot>
) -> bool
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
cb: Callback<EK::Snapshot>
) -> bool
Return true to if the transfer leader request is accepted.
When transferring leadership begins, leader sends a pre-transfer to target follower first to ensures it’s ready to become leader. After that the real transfer leader process begin.
- pre_transfer_leader on leader: Leader will send a MsgTransferLeader to follower.
- execute_transfer_leader on follower If follower passes all necessary checks, it will reply an ACK with type MsgTransferLeader and its promised persistent index.
- execute_transfer_leader on leader: Leader checks if it’s appropriate to transfer leadership. If it does, it calls raft transfer_leader API to do the remaining work.
See also: tikv/rfcs#37.
fn propose_conf_change<T>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
req: &RaftCmdRequest
) -> Result<Either<u64, u64>>
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
req: &RaftCmdRequest
) -> Result<Either<u64, u64>>
Returns Ok(Either::Left(index)) means the proposal is proposed successfully and is located on index
position.
Ok(Either::Right(index)) means the proposal is rejected by CmdEpochChecker
and the index
is the position of
the last conflict admin cmd.
fn propose_conf_change_internal<T, CP: ChangePeerI>(
&mut self,
ctx: &mut PollContext<EK, ER, T>,
change_peer: CP,
data: Vec<u8>
) -> Result<u64>
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>,
change_peer: CP,
data: Vec<u8>
) -> Result<u64>
fn handle_read<T>(
&self,
ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
check_epoch: bool,
read_index: Option<u64>
) -> ReadResponse<EK::Snapshot>
[src]
&self,
ctx: &mut PollContext<EK, ER, T>,
req: RaftCmdRequest,
check_epoch: bool,
read_index: Option<u64>
) -> ReadResponse<EK::Snapshot>
pub fn term(&self) -> u64
[src]
pub fn stop(&mut self)
[src]
pub fn maybe_add_want_rollback_merge_peer(
&mut self,
peer_id: u64,
extra_msg: &ExtraMessage
)
[src]
&mut self,
peer_id: u64,
extra_msg: &ExtraMessage
)
pub fn add_want_rollback_merge_peer(&mut self, peer_id: u64)
[src]
impl<EK, ER> Peer<EK, ER> where
EK: KvEngine,
ER: RaftEngine,
[src]
EK: KvEngine,
ER: RaftEngine,
pub fn insert_peer_cache(&mut self, peer: Peer)
[src]
pub fn remove_peer_from_cache(&mut self, peer_id: u64)
[src]
pub fn get_peer_from_cache(&self, peer_id: u64) -> Option<Peer>
[src]
fn region_replication_status(&mut self) -> Option<RegionReplicationStatus>
[src]
pub fn heartbeat_pd<T>(&mut self, ctx: &PollContext<EK, ER, T>)
[src]
fn prepare_raft_message(&self) -> RaftMessage
[src]
pub fn send_extra_message<T: Transport>(
&self,
msg: ExtraMessage,
trans: &mut T,
to: &Peer
)
[src]
&self,
msg: ExtraMessage,
trans: &mut T,
to: &Peer
)
fn send_raft_message<T: Transport>(
&mut self,
msg: Message,
trans: &mut T
) -> bool
[src]
&mut self,
msg: Message,
trans: &mut T
) -> bool
pub fn bcast_wake_up_message<T: Transport>(
&self,
ctx: &mut PollContext<EK, ER, T>
)
[src]
&self,
ctx: &mut PollContext<EK, ER, T>
)
pub fn send_wake_up_message<T: Transport>(
&self,
ctx: &mut PollContext<EK, ER, T>,
peer: &Peer
)
[src]
&self,
ctx: &mut PollContext<EK, ER, T>,
peer: &Peer
)
pub fn bcast_check_stale_peer_message<T: Transport>(
&mut self,
ctx: &mut PollContext<EK, ER, T>
)
[src]
&mut self,
ctx: &mut PollContext<EK, ER, T>
)
pub fn on_check_stale_peer_response(
&mut self,
check_conf_ver: u64,
check_peers: Vec<Peer>
)
[src]
&mut self,
check_conf_ver: u64,
check_peers: Vec<Peer>
)
pub fn send_want_rollback_merge<T: Transport>(
&self,
premerge_commit: u64,
ctx: &mut PollContext<EK, ER, T>
)
[src]
&self,
premerge_commit: u64,
ctx: &mut PollContext<EK, ER, T>
)
pub fn require_updating_max_ts(
&self,
pd_scheduler: &FutureScheduler<PdTask<EK>>
)
[src]
&self,
pd_scheduler: &FutureScheduler<PdTask<EK>>
)
Trait Implementations
impl<EK, ER> RequestInspector for Peer<EK, ER> where
EK: KvEngine,
ER: RaftEngine,
[src]
EK: KvEngine,
ER: RaftEngine,
fn has_applied_to_current_term(&mut self) -> bool
[src]
fn inspect_lease(&mut self) -> LeaseState
[src]
fn inspect(&mut self, req: &RaftCmdRequest) -> Result<RequestPolicy>
[src]
Auto Trait Implementations
impl<EK, ER> !RefUnwindSafe for Peer<EK, ER>
impl<EK, ER> Send for Peer<EK, ER>
impl<EK, ER> !Sync for Peer<EK, ER>
impl<EK, ER> Unpin for Peer<EK, ER> where
ER: Unpin,
ER: Unpin,
impl<EK, ER> !UnwindSafe for Peer<EK, ER>
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
pub fn borrow_mut(&mut self) -> &mut T
[src]
impl<V, W> ConvertFrom<W> for V where
W: ConvertTo<V>,
[src]
W: ConvertTo<V>,
pub fn convert_from(ctx: &mut EvalContext, from: W) -> Result<V, Error>
[src]
impl<T> From<T> for T
[src]
impl<T> Instrument for T
[src]
pub fn instrument(self, span: Span) -> Instrumented<Self>
[src]
pub fn in_current_span(self) -> Instrumented<Self>
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T> Pointable for T
[src]
pub const ALIGN: usize
[src]
type Init = T
The type for initializers.
pub unsafe fn init(init: <T as Pointable>::Init) -> usize
[src]
pub unsafe fn deref<'a>(ptr: usize) -> &'a T
[src]
pub unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T
[src]
pub unsafe fn drop(ptr: usize)
[src]
impl<T> Pointable for T
[src]
pub const ALIGN: usize
[src]
type Init = T
The type for initializers.
pub unsafe fn init(init: <T as Pointable>::Init) -> usize
[src]
pub unsafe fn deref<'a>(ptr: usize) -> &'a T
[src]
pub unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T
[src]
pub unsafe fn drop(ptr: usize)
[src]
impl<T> Same<T> for T
[src]
type Output = T
Should always be Self
impl<T> Sealed<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
pub fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,
type Error = <U as TryFrom<T>>::Error
The type returned in the event of a conversion error.
pub fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>
[src]
impl<V, T> VZip<V> for T where
V: MultiLane<T>,
[src]
V: MultiLane<T>,