1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT

use fvm_ipld_encoding::repr::*;
use fvm_ipld_encoding::tuple::*;
use fvm_shared::bigint::bigint_ser;
use fvm_shared::clock::{ChainEpoch, EPOCH_UNDEFINED};
use fvm_shared::econ::TokenAmount;

use fvm_shared::sector::{Spacetime, StoragePower};
use fvm_shared::smooth::FilterEstimate;
use lazy_static::lazy_static;
use num_derive::FromPrimitive;

use super::logic::*;

lazy_static! {
    /// 36.266260308195979333 FIL
    pub static ref INITIAL_REWARD_POSITION_ESTIMATE: TokenAmount = TokenAmount::from_atto(36266260308195979333u128);
    /// -1.0982489*10^-7 FIL per epoch.  Change of simple minted tokens between epochs 0 and 1.
    pub static ref INITIAL_REWARD_VELOCITY_ESTIMATE: TokenAmount = TokenAmount::from_atto(-109897758509i64);
}

/// Reward actor state
#[derive(Serialize_tuple, Deserialize_tuple, Default, Debug, Clone)]
pub struct State {
    /// Target `CumsumRealized` needs to reach for `EffectiveNetworkTime` to increase
    /// Expressed in byte-epochs.
    #[serde(with = "bigint_ser")]
    pub cumsum_baseline: Spacetime,

    /// `CumsumRealized` is cumulative sum of network power capped by `BaselinePower(epoch)`.
    /// Expressed in byte-epochs.
    #[serde(with = "bigint_ser")]
    pub cumsum_realized: Spacetime,

    /// Ceiling of real effective network time `theta` based on
    /// `CumsumBaselinePower(theta) == CumsumRealizedPower`
    /// Theta captures the notion of how much the network has progressed in its baseline
    /// and in advancing network time.
    pub effective_network_time: ChainEpoch,

    /// `EffectiveBaselinePower` is the baseline power at the `EffectiveNetworkTime` epoch.
    #[serde(with = "bigint_ser")]
    pub effective_baseline_power: StoragePower,

    /// The reward to be paid in per `WinCount` to block producers.
    /// The actual reward total paid out depends on the number of winners in any round.
    /// This value is recomputed every non-null epoch and used in the next non-null epoch.
    pub this_epoch_reward: TokenAmount,
    /// Smoothed `this_epoch_reward`.
    pub this_epoch_reward_smoothed: FilterEstimate,

    /// The baseline power the network is targeting at `st.Epoch`.
    #[serde(with = "bigint_ser")]
    pub this_epoch_baseline_power: StoragePower,

    /// Epoch tracks for which epoch the Reward was computed.
    pub epoch: ChainEpoch,

    // `TotalStoragePowerReward` tracks the total FIL awarded to block miners
    pub total_storage_power_reward: TokenAmount,

    // Simple and Baseline totals are constants used for computing rewards.
    // They are on chain because of a historical fix resetting baseline value
    // in a way that depended on the history leading immediately up to the
    // migration fixing the value.  These values can be moved from state back
    // into a code constant in a subsequent upgrade.
    pub simple_total: TokenAmount,
    pub baseline_total: TokenAmount,
}

impl State {
    pub fn new(curr_realized_power: StoragePower) -> Self {
        let mut st = Self {
            effective_baseline_power: BASELINE_INITIAL_VALUE.clone(),
            this_epoch_baseline_power: INIT_BASELINE_POWER.clone(),
            epoch: EPOCH_UNDEFINED,
            this_epoch_reward_smoothed: FilterEstimate::new(
                INITIAL_REWARD_POSITION_ESTIMATE.atto().clone(),
                INITIAL_REWARD_VELOCITY_ESTIMATE.atto().clone(),
            ),
            simple_total: SIMPLE_TOTAL.clone(),
            baseline_total: BASELINE_TOTAL.clone(),
            ..Default::default()
        };
        st.update_to_next_epoch_with_reward(&curr_realized_power);

        st
    }

    /// Takes in current realized power and updates internal state
    /// Used for update of internal state during null rounds
    pub(super) fn update_to_next_epoch(&mut self, curr_realized_power: &StoragePower) {
        self.epoch += 1;
        self.this_epoch_baseline_power = baseline_power_from_prev(&self.this_epoch_baseline_power);
        let capped_realized_power =
            std::cmp::min(&self.this_epoch_baseline_power, curr_realized_power);
        self.cumsum_realized += capped_realized_power;

        while self.cumsum_realized > self.cumsum_baseline {
            self.effective_network_time += 1;
            self.effective_baseline_power =
                baseline_power_from_prev(&self.effective_baseline_power);
            self.cumsum_baseline += &self.effective_baseline_power;
        }
    }

    /// Takes in a current realized power for a reward epoch and computes
    /// and updates reward state to track reward for the next epoch
    pub(super) fn update_to_next_epoch_with_reward(&mut self, curr_realized_power: &StoragePower) {
        let prev_reward_theta = compute_r_theta(
            self.effective_network_time,
            &self.effective_baseline_power,
            &self.cumsum_realized,
            &self.cumsum_baseline,
        );
        self.update_to_next_epoch(curr_realized_power);
        let curr_reward_theta = compute_r_theta(
            self.effective_network_time,
            &self.effective_baseline_power,
            &self.cumsum_realized,
            &self.cumsum_baseline,
        );

        self.this_epoch_reward = compute_reward(
            self.epoch,
            prev_reward_theta,
            curr_reward_theta,
            &self.simple_total,
            &self.baseline_total,
        );
    }

    pub fn into_total_storage_power_reward(self) -> TokenAmount {
        self.total_storage_power_reward
    }
}

/// Defines vesting function type for reward actor.
#[derive(Clone, Debug, PartialEq, Eq, Copy, FromPrimitive, Serialize_repr, Deserialize_repr)]
#[repr(u8)]
pub enum VestingFunction {
    None = 0,
    Linear = 1,
}

#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)]
pub struct Reward {
    pub vesting_function: VestingFunction,
    pub start_epoch: ChainEpoch,
    pub end_epoch: ChainEpoch,
    pub value: TokenAmount,
    pub amount_withdrawn: TokenAmount,
}

impl Reward {
    pub fn amount_vested(&self, curr_epoch: ChainEpoch) -> TokenAmount {
        match self.vesting_function {
            VestingFunction::None => self.value.clone(),
            VestingFunction::Linear => {
                let elapsed = curr_epoch - self.start_epoch;
                let vest_duration = self.end_epoch - self.start_epoch;
                if elapsed >= vest_duration {
                    self.value.clone()
                } else {
                    (self.value.clone() * elapsed as u64).div_floor(vest_duration)
                }
            }
        }
    }
}