use std::cmp;
use std::ops::Neg;
use anyhow::{anyhow, Error};
use cid::multihash::Code;
use cid::Cid;
use fil_actors_shared::actor_error_v12;
use fil_actors_shared::v12::runtime::Policy;
use fil_actors_shared::v12::{
make_empty_map, make_map_with_root_and_bitwidth, u64_key, ActorDowncast, ActorError, Array,
};
use fvm_ipld_amt::Error as AmtError;
use fvm_ipld_bitfield::BitField;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::tuple::*;
use fvm_ipld_encoding::{strict_bytes, BytesDe, CborStore};
use fvm_ipld_hamt::Error as HamtError;
use fvm_shared4::address::Address;
use fvm_shared4::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED};
use fvm_shared4::econ::TokenAmount;
use fvm_shared4::error::ExitCode;
use fvm_shared4::sector::{RegisteredPoStProof, SectorNumber, SectorSize, MAX_SECTOR_NUMBER};
use fvm_shared4::{ActorID, HAMT_BIT_WIDTH};
use itertools::Itertools;
use num_traits::Zero;
use super::beneficiary::*;
use super::deadlines::new_deadline_info;
use super::policy::*;
use super::types::*;
use super::{
assign_deadlines, deadline_is_mutable, new_deadline_info_from_offset_and_epoch,
quant_spec_for_deadline, BitFieldQueue, Deadline, DeadlineInfo, DeadlineSectorMap, Deadlines,
PowerPair, Sectors, TerminationResult, VestingFunds,
};
const PRECOMMIT_EXPIRY_AMT_BITWIDTH: u32 = 6;
pub const SECTORS_AMT_BITWIDTH: u32 = 5;
#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)]
pub struct State {
pub info: Cid,
pub pre_commit_deposits: TokenAmount,
pub locked_funds: TokenAmount,
pub vesting_funds: Cid,
pub fee_debt: TokenAmount,
pub initial_pledge: TokenAmount,
pub pre_committed_sectors: Cid,
pub pre_committed_sectors_cleanup: Cid, pub allocated_sectors: Cid, pub sectors: Cid, pub proving_period_start: ChainEpoch,
pub current_deadline: u64,
pub deadlines: Cid,
pub early_terminations: BitField,
pub deadline_cron_active: bool,
}
#[derive(PartialEq, Eq)]
pub enum CollisionPolicy {
AllowCollisions,
DenyCollisions,
}
impl State {
#[allow(clippy::too_many_arguments)]
pub fn new<BS: Blockstore>(
policy: &Policy,
store: &BS,
info_cid: Cid,
period_start: ChainEpoch,
deadline_idx: u64,
) -> anyhow::Result<Self> {
let empty_precommit_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH)
.flush()
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to construct empty precommit map",
)
})?;
let empty_precommits_cleanup_array =
Array::<BitField, BS>::new_with_bit_width(store, PRECOMMIT_EXPIRY_AMT_BITWIDTH)
.flush()
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to construct empty precommits array",
)
})?;
let empty_sectors_array =
Array::<SectorOnChainInfo, BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH)
.flush()
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to construct sectors array",
)
})?;
let empty_bitfield = store
.put_cbor(&BitField::new(), Code::Blake2b256)
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to construct empty bitfield",
)
})?;
let deadline = Deadline::new(store)?;
let empty_deadline = store.put_cbor(&deadline, Code::Blake2b256).map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to construct illegal state",
)
})?;
let empty_deadlines = store
.put_cbor(&Deadlines::new(policy, empty_deadline), Code::Blake2b256)
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to construct illegal state",
)
})?;
let empty_vesting_funds_cid = store
.put_cbor(&VestingFunds::new(), Code::Blake2b256)
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to construct illegal state",
)
})?;
Ok(Self {
info: info_cid,
pre_commit_deposits: TokenAmount::default(),
locked_funds: TokenAmount::default(),
vesting_funds: empty_vesting_funds_cid,
initial_pledge: TokenAmount::default(),
fee_debt: TokenAmount::default(),
pre_committed_sectors: empty_precommit_map,
allocated_sectors: empty_bitfield,
sectors: empty_sectors_array,
proving_period_start: period_start,
current_deadline: deadline_idx,
deadlines: empty_deadlines,
early_terminations: BitField::new(),
deadline_cron_active: false,
pre_committed_sectors_cleanup: empty_precommits_cleanup_array,
})
}
pub fn get_info<BS: Blockstore>(&self, store: &BS) -> anyhow::Result<MinerInfo> {
match store.get_cbor(&self.info) {
Ok(Some(info)) => Ok(info),
Ok(None) => Err(actor_error_v12!(not_found, "failed to get miner info").into()),
Err(e) => Err(e.downcast_wrap("failed to get miner info")),
}
}
pub fn save_info<BS: Blockstore>(
&mut self,
store: &BS,
info: &MinerInfo,
) -> anyhow::Result<()> {
let cid = store.put_cbor(&info, Code::Blake2b256)?;
self.info = cid;
Ok(())
}
pub fn deadline_info(&self, policy: &Policy, current_epoch: ChainEpoch) -> DeadlineInfo {
new_deadline_info_from_offset_and_epoch(policy, self.proving_period_start, current_epoch)
}
pub fn recorded_deadline_info(
&self,
policy: &Policy,
current_epoch: ChainEpoch,
) -> DeadlineInfo {
new_deadline_info(
policy,
self.proving_period_start,
self.current_deadline,
current_epoch,
)
}
pub fn current_proving_period_start(
&self,
policy: &Policy,
current_epoch: ChainEpoch,
) -> ChainEpoch {
let dl_info = self.deadline_info(policy, current_epoch);
dl_info.period_start
}
pub fn quant_spec_for_deadline(&self, policy: &Policy, deadline_idx: u64) -> QuantSpec {
new_deadline_info(policy, self.proving_period_start, deadline_idx, 0).quant_spec()
}
pub fn allocate_sector_numbers<BS: Blockstore>(
&mut self,
store: &BS,
sector_numbers: &BitField,
policy: CollisionPolicy,
) -> Result<(), ActorError> {
let prior_allocation = store
.get_cbor(&self.allocated_sectors)
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to load allocated sectors bitfield",
)
})?
.ok_or_else(|| {
actor_error_v12!(illegal_state, "allocated sectors bitfield not found")
})?;
if policy != CollisionPolicy::AllowCollisions {
let collisions = &prior_allocation & sector_numbers;
if !collisions.is_empty() {
return Err(actor_error_v12!(
illegal_argument,
"sector numbers {:?} already allocated",
collisions
));
}
}
let new_allocation = &prior_allocation | sector_numbers;
self.allocated_sectors =
store
.put_cbor(&new_allocation, Code::Blake2b256)
.map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_ARGUMENT,
format!(
"failed to store allocated sectors bitfield after adding {:?}",
sector_numbers,
),
)
})?;
Ok(())
}
pub fn put_precommitted_sectors<BS: Blockstore>(
&mut self,
store: &BS,
precommits: Vec<SectorPreCommitOnChainInfo>,
) -> anyhow::Result<()> {
let mut precommitted =
make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?;
for precommit in precommits.into_iter() {
let sector_no = precommit.info.sector_number;
let modified = precommitted
.set_if_absent(u64_key(precommit.info.sector_number), precommit)
.map_err(|e| {
e.downcast_wrap(format!("failed to store precommitment for {:?}", sector_no,))
})?;
if !modified {
return Err(anyhow!("sector {} already pre-commited", sector_no));
}
}
self.pre_committed_sectors = precommitted.flush()?;
Ok(())
}
pub fn get_precommitted_sector<BS: Blockstore>(
&self,
store: &BS,
sector_num: SectorNumber,
) -> Result<Option<SectorPreCommitOnChainInfo>, HamtError> {
let precommitted =
make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?;
Ok(precommitted.get(&u64_key(sector_num))?.cloned())
}
pub fn find_precommitted_sectors<BS: Blockstore>(
&self,
store: &BS,
sector_numbers: &[SectorNumber],
) -> anyhow::Result<Vec<SectorPreCommitOnChainInfo>> {
let precommitted = make_map_with_root_and_bitwidth::<_, SectorPreCommitOnChainInfo>(
&self.pre_committed_sectors,
store,
HAMT_BIT_WIDTH,
)?;
let mut result = Vec::with_capacity(sector_numbers.len());
for §or_number in sector_numbers {
let info = match precommitted.get(&u64_key(sector_number)).map_err(|e| {
e.downcast_wrap(format!(
"failed to load precommitment for {}",
sector_number
))
})? {
Some(info) => info.clone(),
None => continue,
};
result.push(info);
}
Ok(result)
}
pub fn delete_precommitted_sectors<BS: Blockstore>(
&mut self,
store: &BS,
sector_nums: &[SectorNumber],
) -> Result<(), HamtError> {
let mut precommitted = make_map_with_root_and_bitwidth::<_, SectorPreCommitOnChainInfo>(
&self.pre_committed_sectors,
store,
HAMT_BIT_WIDTH,
)?;
for §or_num in sector_nums {
let prev_entry = precommitted.delete(&u64_key(sector_num))?;
if prev_entry.is_none() {
return Err(format!("sector {} doesn't exist", sector_num).into());
}
}
self.pre_committed_sectors = precommitted.flush()?;
Ok(())
}
pub fn has_sector_number<BS: Blockstore>(
&self,
store: &BS,
sector_num: SectorNumber,
) -> anyhow::Result<bool> {
let sectors = Sectors::load(store, &self.sectors)?;
Ok(sectors.get(sector_num)?.is_some())
}
pub fn put_sectors<BS: Blockstore>(
&mut self,
store: &BS,
new_sectors: Vec<SectorOnChainInfo>,
) -> anyhow::Result<()> {
let mut sectors = Sectors::load(store, &self.sectors)
.map_err(|e| e.downcast_wrap("failed to load sectors"))?;
sectors.store(new_sectors)?;
self.sectors = sectors
.amt
.flush()
.map_err(|e| e.downcast_wrap("failed to persist sectors"))?;
Ok(())
}
pub fn get_sector<BS: Blockstore>(
&self,
store: &BS,
sector_num: SectorNumber,
) -> anyhow::Result<Option<SectorOnChainInfo>> {
let sectors = Sectors::load(store, &self.sectors)?;
sectors.get(sector_num)
}
pub fn delete_sectors<BS: Blockstore>(
&mut self,
store: &BS,
sector_nos: &BitField,
) -> Result<(), AmtError> {
let mut sectors = Sectors::load(store, &self.sectors)?;
for sector_num in sector_nos.iter() {
let deleted_sector = sectors
.amt
.delete(sector_num)
.map_err(|e| e.downcast_wrap("could not delete sector number"))?;
if deleted_sector.is_none() {
return Err(AmtError::Dynamic(Error::msg(format!(
"sector {} doesn't exist, failed to delete",
sector_num
))));
}
}
self.sectors = sectors.amt.flush()?;
Ok(())
}
pub fn for_each_sector<BS: Blockstore, F>(&self, store: &BS, mut f: F) -> anyhow::Result<()>
where
F: FnMut(&SectorOnChainInfo) -> anyhow::Result<()>,
{
let sectors = Sectors::load(store, &self.sectors)?;
sectors.amt.for_each(|_, v| f(v))?;
Ok(())
}
pub fn find_sector<BS: Blockstore>(
&self,
store: &BS,
sector_number: SectorNumber,
) -> anyhow::Result<(u64, u64)> {
let deadlines = self.load_deadlines(store)?;
deadlines.find_sector(store, sector_number)
}
pub fn reschedule_sector_expirations<BS: Blockstore>(
&mut self,
policy: &Policy,
store: &BS,
current_epoch: ChainEpoch,
sector_size: SectorSize,
mut deadline_sectors: DeadlineSectorMap,
) -> anyhow::Result<Vec<SectorOnChainInfo>> {
let mut deadlines = self.load_deadlines(store)?;
let sectors = Sectors::load(store, &self.sectors)?;
let mut all_replaced = Vec::new();
for (deadline_idx, partition_sectors) in deadline_sectors.iter() {
let deadline_info = new_deadline_info(
policy,
self.current_proving_period_start(policy, current_epoch),
deadline_idx,
current_epoch,
)
.next_not_elapsed();
let new_expiration = deadline_info.last();
let mut deadline = deadlines.load_deadline(store, deadline_idx)?;
let replaced = deadline.reschedule_sector_expirations(
store,
§ors,
new_expiration,
partition_sectors,
sector_size,
deadline_info.quant_spec(),
)?;
all_replaced.extend(replaced);
deadlines.update_deadline(policy, store, deadline_idx, &deadline)?;
}
self.save_deadlines(store, deadlines)?;
Ok(all_replaced)
}
pub fn assign_sectors_to_deadlines<BS: Blockstore>(
&mut self,
policy: &Policy,
store: &BS,
current_epoch: ChainEpoch,
mut sectors: Vec<SectorOnChainInfo>,
partition_size: u64,
sector_size: SectorSize,
) -> anyhow::Result<()> {
let mut deadlines = self.load_deadlines(store)?;
sectors.sort_by_key(|info| info.sector_number);
let mut deadline_vec: Vec<Option<Deadline>> =
(0..policy.wpost_period_deadlines).map(|_| None).collect();
deadlines.for_each(store, |deadline_idx, deadline| {
if deadline_is_mutable(
policy,
self.current_proving_period_start(policy, current_epoch),
deadline_idx,
current_epoch,
) {
deadline_vec[deadline_idx as usize] = Some(deadline);
}
Ok(())
})?;
let deadline_to_sectors = assign_deadlines(
policy,
policy.max_partitions_per_deadline,
partition_size,
&deadline_vec,
sectors,
)?;
for (deadline_idx, deadline_sectors) in deadline_to_sectors.into_iter().enumerate() {
if deadline_sectors.is_empty() {
continue;
}
let quant = self.quant_spec_for_deadline(policy, deadline_idx as u64);
let deadline = deadline_vec[deadline_idx].as_mut().unwrap();
let proven = false;
deadline.add_sectors(
store,
partition_size,
proven,
&deadline_sectors,
sector_size,
quant,
)?;
deadlines.update_deadline(policy, store, deadline_idx as u64, deadline)?;
}
self.save_deadlines(store, deadlines)?;
Ok(())
}
pub fn pop_early_terminations<BS: Blockstore>(
&mut self,
policy: &Policy,
store: &BS,
max_partitions: u64,
max_sectors: u64,
) -> anyhow::Result<(TerminationResult, bool)> {
if self.early_terminations.is_empty() {
return Ok((Default::default(), false));
}
let mut deadlines = self.load_deadlines(store)?;
let mut result = TerminationResult::new();
let mut to_unset = Vec::new();
for i in self.early_terminations.iter() {
let deadline_idx = i;
let mut deadline = deadlines.load_deadline(store, deadline_idx)?;
let (deadline_result, more) = deadline
.pop_early_terminations(
store,
max_partitions - result.partitions_processed,
max_sectors - result.sectors_processed,
)
.map_err(|e| {
e.downcast_wrap(format!(
"failed to pop early terminations for deadline {}",
deadline_idx
))
})?;
result += deadline_result;
if !more {
to_unset.push(i);
}
deadlines.update_deadline(policy, store, deadline_idx, &deadline)?;
if !result.below_limit(max_partitions, max_sectors) {
break;
}
}
for deadline_idx in to_unset {
self.early_terminations.unset(deadline_idx);
}
self.save_deadlines(store, deadlines)?;
let no_early_terminations = self.early_terminations.is_empty();
Ok((result, !no_early_terminations))
}
pub fn check_sector_active<BS: Blockstore>(
&self,
store: &BS,
deadline_idx: u64,
partition_idx: u64,
sector_number: SectorNumber,
require_proven: bool,
) -> anyhow::Result<bool> {
let dls = self.load_deadlines(store)?;
let dl = dls.load_deadline(store, deadline_idx)?;
let partition = dl.load_partition(store, partition_idx)?;
let exists = partition.sectors.get(sector_number);
if !exists {
return Err(actor_error_v12!(
not_found;
"sector {} not a member of partition {}, deadline {}",
sector_number, partition_idx, deadline_idx
)
.into());
}
let faulty = partition.faults.get(sector_number);
if faulty {
return Ok(false);
}
let terminated = partition.terminated.get(sector_number);
if terminated {
return Ok(false);
}
let unproven = partition.unproven.get(sector_number);
if unproven && require_proven {
return Ok(false);
}
Ok(true)
}
pub fn check_sector_health<BS: Blockstore>(
&self,
store: &BS,
deadline_idx: u64,
partition_idx: u64,
sector_number: SectorNumber,
) -> anyhow::Result<()> {
let deadlines = self.load_deadlines(store)?;
let deadline = deadlines.load_deadline(store, deadline_idx)?;
let partition = deadline.load_partition(store, partition_idx)?;
if !partition.sectors.get(sector_number) {
return Err(actor_error_v12!(
not_found;
"sector {} not a member of partition {}, deadline {}",
sector_number, partition_idx, deadline_idx
)
.into());
}
if partition.faults.get(sector_number) {
return Err(actor_error_v12!(
forbidden;
"sector {} not a member of partition {}, deadline {}",
sector_number, partition_idx, deadline_idx
)
.into());
}
if partition.terminated.get(sector_number) {
return Err(actor_error_v12!(
not_found;
"sector {} not of partition {}, deadline {} is terminated",
sector_number, partition_idx, deadline_idx
)
.into());
}
Ok(())
}
pub fn load_sector_infos<BS: Blockstore>(
&self,
store: &BS,
sectors: &BitField,
) -> anyhow::Result<Vec<SectorOnChainInfo>> {
Ok(Sectors::load(store, &self.sectors)?.load_sector(sectors)?)
}
pub fn load_deadlines<BS: Blockstore>(&self, store: &BS) -> Result<Deadlines, ActorError> {
store
.get_cbor::<Deadlines>(&self.deadlines)
.map_err(|e| {
e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines")
})?
.ok_or_else(
|| actor_error_v12!(illegal_state; "failed to load deadlines {}", self.deadlines),
)
}
pub fn save_deadlines<BS: Blockstore>(
&mut self,
store: &BS,
deadlines: Deadlines,
) -> anyhow::Result<()> {
self.deadlines = store.put_cbor(&deadlines, Code::Blake2b256)?;
Ok(())
}
pub fn load_vesting_funds<BS: Blockstore>(&self, store: &BS) -> anyhow::Result<VestingFunds> {
Ok(store
.get_cbor(&self.vesting_funds)
.map_err(|e| {
e.downcast_wrap(format!("failed to load vesting funds {}", self.vesting_funds))
})?
.ok_or_else(
|| actor_error_v12!(not_found; "failed to load vesting funds {:?}", self.vesting_funds),
)?)
}
pub fn save_vesting_funds<BS: Blockstore>(
&mut self,
store: &BS,
funds: &VestingFunds,
) -> anyhow::Result<()> {
self.vesting_funds = store.put_cbor(funds, Code::Blake2b256)?;
Ok(())
}
pub fn continue_deadline_cron(&self) -> bool {
!self.pre_commit_deposits.is_zero()
|| !self.initial_pledge.is_zero()
|| !self.locked_funds.is_zero()
}
pub fn add_pre_commit_deposit(&mut self, amount: &TokenAmount) -> anyhow::Result<()> {
let new_total = &self.pre_commit_deposits + amount;
if new_total.is_negative() {
return Err(anyhow!(
"negative pre-commit deposit {} after adding {} to prior {}",
new_total,
amount,
self.pre_commit_deposits
));
}
self.pre_commit_deposits = new_total;
Ok(())
}
pub fn add_initial_pledge(&mut self, amount: &TokenAmount) -> anyhow::Result<()> {
let new_total = &self.initial_pledge + amount;
if new_total.is_negative() {
return Err(anyhow!(
"negative initial pledge requirement {} after adding {} to prior {}",
new_total,
amount,
self.initial_pledge
));
}
self.initial_pledge = new_total;
Ok(())
}
pub fn apply_penalty(&mut self, penalty: &TokenAmount) -> anyhow::Result<()> {
if penalty.is_negative() {
Err(anyhow!("applying negative penalty {} not allowed", penalty))
} else {
self.fee_debt += penalty;
Ok(())
}
}
pub fn add_locked_funds<BS: Blockstore>(
&mut self,
store: &BS,
current_epoch: ChainEpoch,
vesting_sum: &TokenAmount,
spec: &VestSpec,
) -> anyhow::Result<TokenAmount> {
if vesting_sum.is_negative() {
return Err(anyhow!("negative vesting sum {}", vesting_sum));
}
let mut vesting_funds = self.load_vesting_funds(store)?;
let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch);
self.locked_funds -= &amount_unlocked;
if self.locked_funds.is_negative() {
return Err(anyhow!(
"negative locked funds {} after unlocking {}",
self.locked_funds,
amount_unlocked
));
}
vesting_funds.add_locked_funds(current_epoch, vesting_sum, self.proving_period_start, spec);
self.locked_funds += vesting_sum;
self.save_vesting_funds(store, &vesting_funds)?;
Ok(amount_unlocked)
}
pub fn repay_partial_debt_in_priority_order<BS: Blockstore>(
&mut self,
store: &BS,
current_epoch: ChainEpoch,
curr_balance: &TokenAmount,
) -> Result<
(
TokenAmount, TokenAmount, ),
anyhow::Error,
> {
let unlocked_balance = self.get_unlocked_balance(curr_balance)?;
let fee_debt = self.fee_debt.clone();
let from_vesting = self.unlock_unvested_funds(store, current_epoch, &fee_debt)?;
if from_vesting > self.fee_debt {
return Err(anyhow!(
"should never unlock more than the debt we need to repay"
));
}
self.fee_debt -= &from_vesting;
let from_balance = cmp::min(&unlocked_balance, &self.fee_debt).clone();
self.fee_debt -= &from_balance;
Ok((from_vesting, from_balance))
}
pub fn repay_debts(&mut self, curr_balance: &TokenAmount) -> anyhow::Result<TokenAmount> {
let unlocked_balance = self.get_unlocked_balance(curr_balance)?;
if unlocked_balance < self.fee_debt {
return Err(actor_error_v12!(
insufficient_funds,
"unlocked balance can not repay fee debt ({} < {})",
unlocked_balance,
self.fee_debt
)
.into());
}
Ok(std::mem::take(&mut self.fee_debt))
}
pub fn unlock_unvested_funds<BS: Blockstore>(
&mut self,
store: &BS,
current_epoch: ChainEpoch,
target: &TokenAmount,
) -> anyhow::Result<TokenAmount> {
if target.is_zero() || self.locked_funds.is_zero() {
return Ok(TokenAmount::zero());
}
let mut vesting_funds = self.load_vesting_funds(store)?;
let amount_unlocked = vesting_funds.unlock_unvested_funds(current_epoch, target);
self.locked_funds -= &amount_unlocked;
if self.locked_funds.is_negative() {
return Err(anyhow!(
"negative locked funds {} after unlocking {}",
self.locked_funds,
amount_unlocked
));
}
self.save_vesting_funds(store, &vesting_funds)?;
Ok(amount_unlocked)
}
pub fn unlock_vested_funds<BS: Blockstore>(
&mut self,
store: &BS,
current_epoch: ChainEpoch,
) -> anyhow::Result<TokenAmount> {
if self.locked_funds.is_zero() {
return Ok(TokenAmount::zero());
}
let mut vesting_funds = self.load_vesting_funds(store)?;
let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch);
self.locked_funds -= &amount_unlocked;
if self.locked_funds.is_negative() {
return Err(anyhow!(
"vesting cause locked funds to become negative: {}",
self.locked_funds,
));
}
self.save_vesting_funds(store, &vesting_funds)?;
Ok(amount_unlocked)
}
pub fn check_vested_funds<BS: Blockstore>(
&self,
store: &BS,
current_epoch: ChainEpoch,
) -> anyhow::Result<TokenAmount> {
let vesting_funds = self.load_vesting_funds(store)?;
Ok(vesting_funds
.funds
.iter()
.take_while(|fund| fund.epoch < current_epoch)
.fold(TokenAmount::zero(), |acc, fund| acc + &fund.amount))
}
pub fn get_unlocked_balance(&self, actor_balance: &TokenAmount) -> anyhow::Result<TokenAmount> {
let unlocked_balance =
actor_balance - &self.locked_funds - &self.pre_commit_deposits - &self.initial_pledge;
if unlocked_balance.is_negative() {
return Err(anyhow!("negative unlocked balance {}", unlocked_balance));
}
Ok(unlocked_balance)
}
pub fn get_available_balance(
&self,
actor_balance: &TokenAmount,
) -> anyhow::Result<TokenAmount> {
Ok(self.get_unlocked_balance(actor_balance)? - &self.fee_debt)
}
pub fn check_balance_invariants(&self, balance: &TokenAmount) -> anyhow::Result<()> {
if self.pre_commit_deposits.is_negative() {
return Err(anyhow!(
"pre-commit deposit is negative: {}",
self.pre_commit_deposits
));
}
if self.locked_funds.is_negative() {
return Err(anyhow!("locked funds is negative: {}", self.locked_funds));
}
if self.initial_pledge.is_negative() {
return Err(anyhow!(
"initial pledge is negative: {}",
self.initial_pledge
));
}
if self.fee_debt.is_negative() {
return Err(anyhow!("fee debt is negative: {}", self.fee_debt));
}
let min_balance = &self.pre_commit_deposits + &self.locked_funds + &self.initial_pledge;
if balance < &min_balance {
return Err(anyhow!("fee debt is negative: {}", self.fee_debt));
}
Ok(())
}
pub fn quant_spec_every_deadline(&self, policy: &Policy) -> QuantSpec {
QuantSpec {
unit: policy.wpost_challenge_window,
offset: self.proving_period_start,
}
}
pub fn add_pre_commit_clean_ups<BS: Blockstore>(
&mut self,
policy: &Policy,
store: &BS,
cleanup_events: Vec<(ChainEpoch, u64)>,
) -> anyhow::Result<()> {
let quant = self.quant_spec_every_deadline(policy);
let mut queue =
super::BitFieldQueue::new(store, &self.pre_committed_sectors_cleanup, quant)
.map_err(|e| e.downcast_wrap("failed to load pre-commit clean up queue"))?;
queue.add_many_to_queue_values(cleanup_events.into_iter())?;
self.pre_committed_sectors_cleanup = queue.amt.flush()?;
Ok(())
}
pub fn cleanup_expired_pre_commits<BS: Blockstore>(
&mut self,
policy: &Policy,
store: &BS,
current_epoch: ChainEpoch,
) -> anyhow::Result<TokenAmount> {
let mut deposit_to_burn = TokenAmount::zero();
let mut cleanup_queue = BitFieldQueue::new(
store,
&self.pre_committed_sectors_cleanup,
self.quant_spec_every_deadline(policy),
)?;
let (sectors, modified) = cleanup_queue.pop_until(current_epoch)?;
if modified {
self.pre_committed_sectors_cleanup = cleanup_queue.amt.flush()?;
}
let mut precommits_to_delete = Vec::new();
for i in sectors.iter() {
let sector_number = i as SectorNumber;
let sector = match self.get_precommitted_sector(store, sector_number)? {
Some(sector) => sector,
None => continue,
};
precommits_to_delete.push(sector_number);
deposit_to_burn += sector.pre_commit_deposit;
}
if !precommits_to_delete.is_empty() {
self.delete_precommitted_sectors(store, &precommits_to_delete)?;
}
self.pre_commit_deposits -= &deposit_to_burn;
if self.pre_commit_deposits.is_negative() {
return Err(anyhow!(
"pre-commit clean up caused negative deposits: {}",
self.pre_commit_deposits
));
}
Ok(deposit_to_burn)
}
pub fn advance_deadline<BS: Blockstore>(
&mut self,
policy: &Policy,
store: &BS,
current_epoch: ChainEpoch,
) -> anyhow::Result<AdvanceDeadlineResult> {
let mut pledge_delta = TokenAmount::zero();
let dl_info = self.deadline_info(policy, current_epoch);
if !dl_info.period_started() {
return Ok(AdvanceDeadlineResult {
pledge_delta,
power_delta: PowerPair::zero(),
previously_faulty_power: PowerPair::zero(),
detected_faulty_power: PowerPair::zero(),
total_faulty_power: PowerPair::zero(),
});
}
self.current_deadline = (dl_info.index + 1) % policy.wpost_period_deadlines;
if self.current_deadline == 0 {
self.proving_period_start = dl_info.period_start + policy.wpost_proving_period;
}
let mut deadlines = self.load_deadlines(store)?;
let mut deadline = deadlines.load_deadline(store, dl_info.index)?;
let previously_faulty_power = deadline.faulty_power.clone();
if !deadline.is_live() {
return Ok(AdvanceDeadlineResult {
pledge_delta,
power_delta: PowerPair::zero(),
previously_faulty_power,
detected_faulty_power: PowerPair::zero(),
total_faulty_power: deadline.faulty_power,
});
}
let quant = quant_spec_for_deadline(policy, &dl_info);
let fault_expiration = dl_info.last() + policy.fault_max_age;
let (mut power_delta, detected_faulty_power) =
deadline.process_deadline_end(store, quant, fault_expiration, self.sectors)?;
let total_faulty_power = deadline.faulty_power.clone();
let expired = deadline.pop_expired_sectors(store, dl_info.last(), quant)?;
pledge_delta -= &expired.on_time_pledge;
self.add_initial_pledge(&expired.on_time_pledge.neg())?;
power_delta -= &expired.active_power;
let no_early_terminations = expired.early_sectors.is_empty();
if !no_early_terminations {
self.early_terminations.set(dl_info.index);
}
deadlines.update_deadline(policy, store, dl_info.index, &deadline)?;
self.save_deadlines(store, deadlines)?;
Ok(AdvanceDeadlineResult {
pledge_delta,
power_delta,
previously_faulty_power,
detected_faulty_power,
total_faulty_power,
})
}
pub fn get_all_precommitted_sectors<BS: Blockstore>(
&self,
store: &BS,
sector_nos: &BitField,
) -> anyhow::Result<Vec<SectorPreCommitOnChainInfo>> {
let mut precommits = Vec::new();
let precommitted =
make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?;
for sector_no in sector_nos.iter() {
if sector_no > MAX_SECTOR_NUMBER {
return Err(
actor_error_v12!(illegal_argument; "sector number greater than maximum").into(),
);
}
let info: &SectorPreCommitOnChainInfo = precommitted
.get(&u64_key(sector_no))?
.ok_or_else(|| actor_error_v12!(not_found, "sector {} not found", sector_no))?;
precommits.push(info.clone());
}
Ok(precommits)
}
}
pub struct AdvanceDeadlineResult {
pub pledge_delta: TokenAmount,
pub power_delta: PowerPair,
pub previously_faulty_power: PowerPair,
pub detected_faulty_power: PowerPair,
pub total_faulty_power: PowerPair,
}
#[derive(Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)]
pub struct MinerInfo {
pub owner: Address,
pub worker: Address,
pub control_addresses: Vec<Address>, pub pending_worker_key: Option<WorkerKeyChange>,
#[serde(with = "strict_bytes")]
pub peer_id: Vec<u8>,
pub multi_address: Vec<BytesDe>,
pub window_post_proof_type: RegisteredPoStProof,
pub sector_size: SectorSize,
pub window_post_partition_sectors: u64,
pub consensus_fault_elapsed: ChainEpoch,
pub pending_owner_address: Option<Address>,
pub beneficiary: Address,
pub beneficiary_term: BeneficiaryTerm,
pub pending_beneficiary_term: Option<PendingBeneficiaryChange>,
}
impl MinerInfo {
pub fn new(
owner: ActorID,
worker: ActorID,
control_addresses: Vec<ActorID>,
peer_id: Vec<u8>,
multi_address: Vec<BytesDe>,
window_post_proof_type: RegisteredPoStProof,
) -> Result<Self, ActorError> {
let sector_size = window_post_proof_type
.sector_size()
.map_err(|e| actor_error_v12!(illegal_argument, "invalid sector size: {}", e))?;
let window_post_partition_sectors = window_post_proof_type
.window_post_partitions_sector()
.map_err(|e| actor_error_v12!(illegal_argument, "invalid partition sectors: {}", e))?;
Ok(Self {
owner: Address::new_id(owner),
worker: Address::new_id(worker),
control_addresses: control_addresses
.into_iter()
.map(Address::new_id)
.collect_vec(),
pending_worker_key: None,
beneficiary: Address::new_id(owner),
beneficiary_term: BeneficiaryTerm::default(),
pending_beneficiary_term: None,
peer_id,
multi_address,
window_post_proof_type,
sector_size,
window_post_partition_sectors,
consensus_fault_elapsed: EPOCH_UNDEFINED,
pending_owner_address: None,
})
}
}