#include <subdev/clk.h>
#include <subdev/volt.h>
#include <subdev/timer.h>
#include <core/device.h>
#include <core/tegra.h>
#include "priv.h"
#include "gk20a.h"
#define GPCPLL_CFG_SYNC_MODE BIT(2)
#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
#define GPCPLL_CFG2_SDM_DIN_SHIFT 0
#define GPCPLL_CFG2_SDM_DIN_WIDTH 8
#define GPCPLL_CFG2_SDM_DIN_MASK \
(MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8
#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15
#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \
(MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
#define GPCPLL_CFG2_SETUP2_SHIFT 16
#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10)
#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0
#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7
#define GPCPLL_DVFS0_DFS_COEFF_MASK \
(MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \
(MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14)
#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0
#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7
#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7
#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1
#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8
#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7
#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15
#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1
#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16
#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12
#define GPCPLL_DVFS1_EN_SDM_SHIFT 28
#define GPCPLL_DVFS1_EN_SDM_WIDTH 1
#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28)
#define GPCPLL_DVFS1_EN_DFS_SHIFT 29
#define GPCPLL_DVFS1_EN_DFS_WIDTH 1
#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29)
#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30
#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1
#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30)
#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31
#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1
#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31)
#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16)
#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24
#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7
#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
struct gm20b_clk_dvfs_params {
s32 coeff_slope;
s32 coeff_offs;
u32 vco_ctrl;
};
static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
.coeff_slope = -165230,
.coeff_offs = 214007,
.vco_ctrl = 0x7 << 3,
};
struct gm20b_pll {
struct gk20a_pll base;
u32 sdm_din;
};
struct gm20b_clk_dvfs {
u32 dfs_coeff;
s32 dfs_det_max;
s32 dfs_ext_cal;
};
struct gm20b_clk {
struct gk20a_clk base;
struct gm20b_clk_dvfs dvfs;
u32 uv;
struct gk20a_pll new_pll;
struct gm20b_clk_dvfs new_dvfs;
u32 new_uv;
const struct gm20b_clk_dvfs_params *dvfs_params;
s32 uvdet_slope;
s32 uvdet_offs;
u32 safe_fmax_vmin;
};
#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
static u32 pl_to_div(u32 pl)
{
return pl;
}
static u32 div_to_pl(u32 div)
{
return div;
}
static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
.min_vco = 1300000, .max_vco = 2600000,
.min_u = 12000, .max_u = 38400,
.min_m = 1, .max_m = 255,
.min_n = 8, .max_n = 255,
.min_pl = 1, .max_pl = 31,
};
static void
gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
u32 val;
gk20a_pllg_read_mnp(&clk->base, &pll->base);
val = nvkm_rd32(device, GPCPLL_CFG2);
pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
}
static void
gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
{
struct nvkm_device *device = clk->base.base.subdev.device;
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
gk20a_pllg_write_mnp(&clk->base, &pll->base);
}
static void
gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
struct gm20b_clk_dvfs *dvfs)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
u32 coeff;
s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
coeff = DIV_ROUND_CLOSEST(coeff, 1000);
dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
clk->uvdet_slope);
if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
nvkm_error(subdev, "dfs_ext_cal overflow!\n");
dvfs->dfs_det_max = 0;
nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
__func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
dvfs->dfs_det_max);
}
static void
gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
const struct gk20a_clk_pllg_params *p = clk->base.params;
u32 n;
s32 det_delta;
u32 rem, rem_range;
det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
clk->uvdet_slope);
det_delta -= clk->dvfs.dfs_ext_cal;
det_delta = min(det_delta, clk->dvfs.dfs_det_max);
det_delta *= clk->dvfs.dfs_coeff;
n = (n_eff << DFS_DET_RANGE) - det_delta;
if (n <= 0) {
nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
n = 1 << DFS_DET_RANGE;
}
if (n >> DFS_DET_RANGE > p->max_n) {
nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
n = p->max_n << DFS_DET_RANGE;
}
*n_int = n >> DFS_DET_RANGE;
rem = ((u32)n) & MASK(DFS_DET_RANGE);
rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
*sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
n_eff, *n_int, *sdm_din);
}
static int
gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
struct gm20b_pll pll;
u32 n_int, sdm_din;
int ret = 0;
gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
gm20b_pllg_read_mnp(clk, &pll);
if (n_int == pll.base.n && sdm_din == pll.sdm_din)
return 0;
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
pll.base.n = n_int;
udelay(1);
gk20a_pllg_write_mnp(&clk->base, &pll.base);
udelay(1);
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
ret = -ETIMEDOUT;
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
return ret;
}
static int
gm20b_pllg_enable(struct gm20b_clk *clk)
{
struct nvkm_device *device = clk->base.base.subdev.device;
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
udelay(40);
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
GPCPLL_CFG_SYNC_MODE);
nvkm_rd32(device, GPCPLL_CFG);
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
return 0;
}
static void
gm20b_pllg_disable(struct gm20b_clk *clk)
{
struct nvkm_device *device = clk->base.base.subdev.device;
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
}
static int
gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
struct gm20b_pll cur_pll;
u32 n_int, sdm_din;
bool pdiv_only;
int ret;
gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
gm20b_pllg_read_mnp(clk, &cur_pll);
pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
cur_pll.base.m == pll->m;
if (!gk20a_pllg_is_enabled(&clk->base))
pdiv_only = false;
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_rd32(device, GPC2CLK_OUT);
udelay(2);
if (pdiv_only) {
u32 old = cur_pll.base.pl;
u32 new = pll->pl;
if ((old & new) == 0) {
cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
new | BIT(ffs(old) - 1));
gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
}
cur_pll.base.pl = new;
gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
} else {
gm20b_pllg_disable(clk);
cur_pll.base = *pll;
cur_pll.base.n = n_int;
cur_pll.sdm_din = sdm_din;
gm20b_pllg_write_mnp(clk, &cur_pll);
ret = gm20b_pllg_enable(clk);
if (ret)
return ret;
}
udelay(2);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_rd32(device, GPC2CLK_OUT);
return 0;
}
static int
gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
{
struct gk20a_pll cur_pll;
int ret;
if (gk20a_pllg_is_enabled(&clk->base)) {
gk20a_pllg_read_mnp(&clk->base, &cur_pll);
if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
return gm20b_pllg_slide(clk, pll->n);
cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
ret = gm20b_pllg_slide(clk, cur_pll.n);
if (ret)
return ret;
}
cur_pll = *pll;
cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
ret = gm20b_pllg_program_mnp(clk, &cur_pll);
if (ret)
return ret;
return gm20b_pllg_slide(clk, pll->n);
}
static int
gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct gm20b_clk *clk = gm20b_clk(base);
struct nvkm_subdev *subdev = &base->subdev;
struct nvkm_volt *volt = base->subdev.device->volt;
int ret;
ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
GK20A_CLK_GPC_MDIV, &clk->new_pll);
if (ret)
return ret;
clk->new_uv = volt->vid[cstate->voltage].uv;
gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
return 0;
}
static void
gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
{
u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
u32 parent_rate = clk->base.parent_rate / KHZ;
u32 nmin, nsafe;
if (rate > clk->safe_fmax_vmin)
rate = rate * (100 - 10) / 100;
rate *= 2;
nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
nsafe = pll->m * rate / (clk->base.parent_rate);
if (nsafe < nmin) {
pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
nsafe = nmin;
}
pll->n = nsafe;
}
static void
gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
{
struct nvkm_device *device = clk->base.base.subdev.device;
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
udelay(1);
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
}
static void
gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
{
struct nvkm_device *device = clk->base.base.subdev.device;
u32 val;
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
dfs_det_cal);
udelay(1);
val = nvkm_rd32(device, GPCPLL_DVFS1);
if (!(val & BIT(25))) {
val |= BIT(25) | BIT(16);
nvkm_wr32(device, GPCPLL_DVFS1, val);
}
}
static void
gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
struct gm20b_clk_dvfs *dvfs)
{
struct nvkm_device *device = clk->base.base.subdev.device;
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
nvkm_mask(device, GPCPLL_DVFS0,
GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
udelay(1);
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
}
static int
gm20b_clk_prog(struct nvkm_clk *base)
{
struct gm20b_clk *clk = gm20b_clk(base);
u32 cur_freq;
int ret;
if (clk->uv == clk->new_uv)
goto prog;
cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
if (cur_freq > clk->safe_fmax_vmin) {
struct gk20a_pll pll_safe;
if (clk->uv < clk->new_uv)
pll_safe = clk->base.pll;
else
pll_safe = clk->new_pll;
gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
if (ret)
return ret;
}
gm20b_dvfs_program_coeff(clk, 0);
gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
prog:
clk->uv = clk->new_uv;
clk->dvfs = clk->new_dvfs;
clk->base.pll = clk->new_pll;
return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
}
static struct nvkm_pstate
gm20b_pstates[] = {
{
.base = {
.domain[nv_clk_src_gpc] = 76800,
.voltage = 0,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 153600,
.voltage = 1,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 230400,
.voltage = 2,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 307200,
.voltage = 3,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 384000,
.voltage = 4,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 460800,
.voltage = 5,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 537600,
.voltage = 6,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 614400,
.voltage = 7,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 691200,
.voltage = 8,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 768000,
.voltage = 9,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 844800,
.voltage = 10,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 921600,
.voltage = 11,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 998400,
.voltage = 12,
},
},
};
static void
gm20b_clk_fini(struct nvkm_clk *base)
{
struct nvkm_device *device = base->subdev.device;
struct gm20b_clk *clk = gm20b_clk(base);
if (gk20a_pllg_is_enabled(&clk->base)) {
struct gk20a_pll pll;
u32 n_lo;
gk20a_pllg_read_mnp(&clk->base, &pll);
n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
gm20b_pllg_slide(clk, n_lo);
}
gm20b_pllg_disable(clk);
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
}
static int
gm20b_clk_init_dvfs(struct gm20b_clk *clk)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
bool fused = clk->uvdet_offs && clk->uvdet_slope;
static const s32 ADC_SLOPE_UV = 10000;
u32 data;
int ret;
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
GPCPLL_DVFS1_EN_DFS_BIT);
if (clk->dvfs_params->vco_ctrl)
nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
if (fused) {
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
GPCPLL_DVFS1_EN_DFS_CAL_BIT);
goto calibrated;
}
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
GPCPLL_DVFS1_EN_DFS_CAL_BIT);
ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
if (ret < 0) {
nvkm_error(subdev, "GPCPLL calibration timeout\n");
return -ETIMEDOUT;
}
data = nvkm_rd32(device, GPCPLL_CFG3) >>
GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
clk->uvdet_slope = ADC_SLOPE_UV;
clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
clk->uvdet_offs, clk->uvdet_slope);
calibrated:
gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
gm20b_dvfs_program_coeff(clk, 0);
gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
return 0;
}
static const struct nvkm_clk_func gm20b_clk;
static int
gm20b_clk_init(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
int ret;
u32 data;
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
nvkm_rd32(device, GPCPLL_CFG);
udelay(5);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
GPC2CLK_OUT_INIT_VAL);
nvkm_mask(device, BYPASSCTRL_SYS,
MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
0);
ret = gk20a_clk_setup_slide(clk);
if (ret)
return ret;
data = nvkm_rd32(device, 0x021944);
if (!(data & 0x3)) {
data |= 0x2;
nvkm_wr32(device, 0x021944, data);
data = nvkm_rd32(device, 0x021948);
data |= 0x1;
nvkm_wr32(device, 0x021948, data);
}
nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
if (clk->base.func == &gm20b_clk) {
struct gm20b_clk *_clk = gm20b_clk(base);
struct nvkm_volt *volt = device->volt;
_clk->uv = nvkm_volt_get(volt);
ret = gm20b_clk_init_dvfs(_clk);
if (ret)
return ret;
}
base->func->calc(base, &base->func->pstates[0].base);
ret = base->func->prog(base);
if (ret) {
nvkm_error(subdev, "cannot initialize clock\n");
return ret;
}
return 0;
}
static const struct nvkm_clk_func
gm20b_clk_speedo0 = {
.init = gm20b_clk_init,
.fini = gk20a_clk_fini,
.read = gk20a_clk_read,
.calc = gk20a_clk_calc,
.prog = gk20a_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gm20b_pstates,
.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
{ nv_clk_src_max },
},
};
static const struct nvkm_clk_func
gm20b_clk = {
.init = gm20b_clk_init,
.fini = gm20b_clk_fini,
.read = gk20a_clk_read,
.calc = gm20b_clk_calc,
.prog = gm20b_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gm20b_pstates,
.nr_pstates = ARRAY_SIZE(gm20b_pstates),
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
{ nv_clk_src_max },
},
};
static int
gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return -ENOMEM;
*pclk = &clk->base;
ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
return ret;
}
#define FUSE_RESERVED_CALIB0 0x204
#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0
#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4
#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4
#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10
#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14
#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10
#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24
#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6
#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30
#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2
static int
gm20b_clk_init_fused_params(struct gm20b_clk *clk)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
u32 val = 0;
u32 rev = 0;
#if IS_ENABLED(CONFIG_ARCH_TEGRA)
tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
#endif
if (rev == 0)
return -EINVAL;
clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
clk->uvdet_slope, clk->uvdet_offs);
return 0;
}
static int
gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_volt *volt = subdev->device->volt;
struct nvkm_pstate *pstates = clk->base.base.func->pstates;
int nr_pstates = clk->base.base.func->nr_pstates;
int vmin, id = 0;
u32 fmax = 0;
int i;
vmin = volt->vid[0].uv;
for (i = 1; i < volt->vid_nr; i++) {
if (volt->vid[i].uv <= vmin) {
vmin = volt->vid[i].uv;
id = volt->vid[i].vid;
}
}
for (i = 0; i < nr_pstates; i++)
if (pstates[i].base.voltage == id)
fmax = max(fmax,
pstates[i].base.domain[nv_clk_src_gpc]);
if (!fmax) {
nvkm_error(subdev, "failed to evaluate safe fmax\n");
return -EINVAL;
}
clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
return 0;
}
int
gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gm20b_clk *clk;
struct nvkm_subdev *subdev;
struct gk20a_clk_pllg_params *clk_params;
int ret;
if (tdev->gpu_speedo_id == 0)
return gm20b_clk_new_speedo0(device, type, inst, pclk);
clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
if (!clk)
return -ENOMEM;
*pclk = &clk->base.base;
subdev = &clk->base.base.subdev;
clk_params = (void *) (clk + 1);
*clk_params = gm20b_pllg_params;
ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base);
if (ret)
return ret;
clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
(clk->base.parent_rate / KHZ));
if (clk_params->max_m == 0) {
nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
kfree(clk);
return gm20b_clk_new_speedo0(device, type, inst, pclk);
}
clk->base.pl_to_div = pl_to_div;
clk->base.div_to_pl = div_to_pl;
clk->dvfs_params = &gm20b_dvfs_params;
ret = gm20b_clk_init_fused_params(clk);
if (ret)
nvkm_warn(subdev, "no fused calibration parameters\n");
ret = gm20b_clk_init_safe_fmax(clk);
if (ret)
return ret;
return 0;
}