Bug Summary

File:dev/pci/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
Warning:line 80, column 3
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name dcn30_dpp_cm.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
1/*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "core_types.h"
28#include "reg_helper.h"
29#include "dcn30_dpp.h"
30#include "basics/conversion.h"
31#include "dcn30_cm_common.h"
32
33#define REG(reg)dpp->tf_regs->reg\
34 dpp->tf_regs->reg
35
36#define CTXdpp->base.ctx \
37 dpp->base.ctx
38
39#undef FN
40#define FN(reg_name, field_name)dpp->tf_shift->field_name, dpp->tf_mask->field_name \
41 dpp->tf_shift->field_name, dpp->tf_mask->field_name
42
43static void dpp3_enable_cm_block(
44 struct dpp *dpp_base)
45{
46 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
47
48 unsigned int cm_bypass_mode = 0;
49
50 // debug option: put CM in bypass mode
51 if (dpp_base->ctx->dc->debug.cm_in_bypass)
52 cm_bypass_mode = 1;
53
54 REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_CONTROL
, 1, dpp->tf_shift->CM_BYPASS, dpp->tf_mask->CM_BYPASS
, cm_bypass_mode)
;
55}
56
57static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
58{
59 enum dc_lut_mode mode;
4
'mode' declared without an initial value
60 uint32_t state_mode;
61 uint32_t lut_mode;
62 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
63
64 REG_GET(CM_GAMCOR_CONTROL,generic_reg_get(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_CONTROL
, dpp->tf_shift->CM_GAMCOR_MODE_CURRENT, dpp->tf_mask
->CM_GAMCOR_MODE_CURRENT, &state_mode)
65 CM_GAMCOR_MODE_CURRENT, &state_mode)generic_reg_get(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_CONTROL
, dpp->tf_shift->CM_GAMCOR_MODE_CURRENT, dpp->tf_mask
->CM_GAMCOR_MODE_CURRENT, &state_mode)
;
66
67 if (state_mode == 0)
5
Assuming 'state_mode' is not equal to 0
6
Taking false branch
68 mode = LUT_BYPASS;
69
70 if (state_mode == 2) {//Programmable RAM LUT
7
Assuming 'state_mode' is not equal to 2
8
Taking false branch
71 REG_GET(CM_GAMCOR_CONTROL,generic_reg_get(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_CONTROL
, dpp->tf_shift->CM_GAMCOR_SELECT_CURRENT, dpp->tf_mask
->CM_GAMCOR_SELECT_CURRENT, &lut_mode)
72 CM_GAMCOR_SELECT_CURRENT, &lut_mode)generic_reg_get(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_CONTROL
, dpp->tf_shift->CM_GAMCOR_SELECT_CURRENT, dpp->tf_mask
->CM_GAMCOR_SELECT_CURRENT, &lut_mode)
;
73
74 if (lut_mode == 0)
75 mode = LUT_RAM_A;
76 else
77 mode = LUT_RAM_B;
78 }
79
80 return mode;
9
Undefined or garbage value returned to caller
81}
82
83static void dpp3_program_gammcor_lut(
84 struct dpp *dpp_base,
85 const struct pwl_result_data *rgb,
86 uint32_t num,
87 bool_Bool is_ram_a)
88{
89 uint32_t i;
90 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
91 uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
92 uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
93 uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
94
95 /*fill in the LUT with all base values to be used by pwl module
96 * HW auto increments the LUT index: back-to-back write
97 */
98 if (is_rgb_equal(rgb, num)) {
99 for (i = 0 ; i < num; i++)
100 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, rgb[i].red_reg)
;
101
102 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, last_base_value_red)
;
103
104 } else {
105 REG_UPDATE(CM_GAMCOR_LUT_CONTROL,generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4)
106 CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4)
;
107 for (i = 0 ; i < num; i++)
108 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, rgb[i].red_reg)
;
109
110 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, last_base_value_red)
;
111
112 REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_INDEX
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_INDEX, dpp->tf_mask
->CM_GAMCOR_LUT_INDEX, 0)
;
113
114 REG_UPDATE(CM_GAMCOR_LUT_CONTROL,generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2)
115 CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2)
;
116 for (i = 0 ; i < num; i++)
117 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, rgb[i].green_reg)
;
118
119 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, last_base_value_green)
;
120
121 REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_INDEX
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_INDEX, dpp->tf_mask
->CM_GAMCOR_LUT_INDEX, 0)
;
122
123 REG_UPDATE(CM_GAMCOR_LUT_CONTROL,generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1)
124 CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1)
;
125 for (i = 0 ; i < num; i++)
126 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, rgb[i].blue_reg)
;
127
128 REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_DATA
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_DATA, dpp->tf_mask
->CM_GAMCOR_LUT_DATA, last_base_value_blue)
;
129 }
130}
131
132static void dpp3_power_on_gamcor_lut(
133 struct dpp *dpp_base,
134 bool_Bool power_on)
135{
136 uint32_t power_status;
137 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
138
139
140 REG_SET(CM_MEM_PWR_CTRL, 0,generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_MEM_PWR_CTRL
, 0, 1, dpp->tf_shift->GAMCOR_MEM_PWR_DIS, dpp->tf_mask
->GAMCOR_MEM_PWR_DIS, power_on == 1 ? 0:1)
141 GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_MEM_PWR_CTRL
, 0, 1, dpp->tf_shift->GAMCOR_MEM_PWR_DIS, dpp->tf_mask
->GAMCOR_MEM_PWR_DIS, power_on == 1 ? 0:1)
;
142
143 REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status)generic_reg_get(dpp->base.ctx, dpp->tf_regs->CM_MEM_PWR_STATUS
, dpp->tf_shift->GAMCOR_MEM_PWR_STATE, dpp->tf_mask->
GAMCOR_MEM_PWR_STATE, &power_status)
;
144 if (power_status != 0)
145 BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 145); do
{} while (0); } while (0)
;
146
147
148}
149
150void dpp3_program_cm_dealpha(
151 struct dpp *dpp_base,
152 uint32_t enable, uint32_t additive_blending)
153{
154 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
155
156 REG_SET_2(CM_DEALPHA, 0,generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_DEALPHA
, 0, 2, dpp->tf_shift->CM_DEALPHA_EN, dpp->tf_mask->
CM_DEALPHA_EN, enable, dpp->tf_shift->CM_DEALPHA_ABLND,
dpp->tf_mask->CM_DEALPHA_ABLND, additive_blending)
157 CM_DEALPHA_EN, enable,generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_DEALPHA
, 0, 2, dpp->tf_shift->CM_DEALPHA_EN, dpp->tf_mask->
CM_DEALPHA_EN, enable, dpp->tf_shift->CM_DEALPHA_ABLND,
dpp->tf_mask->CM_DEALPHA_ABLND, additive_blending)
158 CM_DEALPHA_ABLND, additive_blending)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_DEALPHA
, 0, 2, dpp->tf_shift->CM_DEALPHA_EN, dpp->tf_mask->
CM_DEALPHA_EN, enable, dpp->tf_shift->CM_DEALPHA_ABLND,
dpp->tf_mask->CM_DEALPHA_ABLND, additive_blending)
;
159}
160
161void dpp3_program_cm_bias(
162 struct dpp *dpp_base,
163 struct CM_bias_params *bias_params)
164{
165 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
166
167 REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_BIAS_CR_R
, 0, 1, dpp->tf_shift->CM_BIAS_CR_R, dpp->tf_mask->
CM_BIAS_CR_R, bias_params->cm_bias_cr_r)
;
168 REG_SET_2(CM_BIAS_Y_G_CB_B, 0,generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_BIAS_Y_G_CB_B
, 0, 2, dpp->tf_shift->CM_BIAS_Y_G, dpp->tf_mask->
CM_BIAS_Y_G, bias_params->cm_bias_y_g, dpp->tf_shift->
CM_BIAS_CB_B, dpp->tf_mask->CM_BIAS_CB_B, bias_params->
cm_bias_cb_b)
169 CM_BIAS_Y_G, bias_params->cm_bias_y_g,generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_BIAS_Y_G_CB_B
, 0, 2, dpp->tf_shift->CM_BIAS_Y_G, dpp->tf_mask->
CM_BIAS_Y_G, bias_params->cm_bias_y_g, dpp->tf_shift->
CM_BIAS_CB_B, dpp->tf_mask->CM_BIAS_CB_B, bias_params->
cm_bias_cb_b)
170 CM_BIAS_CB_B, bias_params->cm_bias_cb_b)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_BIAS_Y_G_CB_B
, 0, 2, dpp->tf_shift->CM_BIAS_Y_G, dpp->tf_mask->
CM_BIAS_Y_G, bias_params->cm_bias_y_g, dpp->tf_shift->
CM_BIAS_CB_B, dpp->tf_mask->CM_BIAS_CB_B, bias_params->
cm_bias_cb_b)
;
171}
172
173static void dpp3_gamcor_reg_field(
174 struct dcn3_dpp *dpp,
175 struct dcn3_xfer_func_reg *reg)
176{
177
178 reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
179 reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
180 reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
181 reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
182
183 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
184 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
185 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
186 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
187 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
188 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
189 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
190 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
191
192 reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
193 reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
194 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
195 reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
196 reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
197 reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
198 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
199 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
200 reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
201 reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
202 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
203 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
204}
205
206static void dpp3_configure_gamcor_lut(
207 struct dpp *dpp_base,
208 bool_Bool is_ram_a)
209{
210 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
211
212 REG_UPDATE(CM_GAMCOR_LUT_CONTROL,generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7)
213 CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_WRITE_COLOR_MASK, dpp
->tf_mask->CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7)
;
214 REG_UPDATE(CM_GAMCOR_LUT_CONTROL,generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_HOST_SEL, dpp->tf_mask
->CM_GAMCOR_LUT_HOST_SEL, is_ram_a == 1 ? 0:1)
215 CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_LUT_HOST_SEL, dpp->tf_mask
->CM_GAMCOR_LUT_HOST_SEL, is_ram_a == 1 ? 0:1)
;
216 REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_LUT_INDEX
, 0, 1, dpp->tf_shift->CM_GAMCOR_LUT_INDEX, dpp->tf_mask
->CM_GAMCOR_LUT_INDEX, 0)
;
217}
218
219
220bool_Bool dpp3_program_gamcor_lut(
221 struct dpp *dpp_base, const struct pwl_params *params)
222{
223 enum dc_lut_mode current_mode;
224 enum dc_lut_mode next_mode;
225 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
226 struct dcn3_xfer_func_reg gam_regs;
227
228 dpp3_enable_cm_block(dpp_base);
229
230 if (params == NULL((void *)0)) { //bypass if we have no pwl data
1
Assuming 'params' is not equal to NULL
2
Taking false branch
231 REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_CONTROL
, 0, 1, dpp->tf_shift->CM_GAMCOR_MODE, dpp->tf_mask->
CM_GAMCOR_MODE, 0)
;
232 return false0;
233 }
234 dpp3_power_on_gamcor_lut(dpp_base, true1);
235 REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_CONTROL
, 0, 1, dpp->tf_shift->CM_GAMCOR_MODE, dpp->tf_mask->
CM_GAMCOR_MODE, 2)
;
236
237 current_mode = dpp30_get_gamcor_current(dpp_base);
3
Calling 'dpp30_get_gamcor_current'
238 if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
239 next_mode = LUT_RAM_B;
240 else
241 next_mode = LUT_RAM_A;
242
243 dpp3_power_on_gamcor_lut(dpp_base, true1);
244 dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A ? true1:false0);
245
246 if (next_mode == LUT_RAM_B) {
247 gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B)dpp->tf_regs->CM_GAMCOR_RAMB_START_CNTL_B;
248 gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G)dpp->tf_regs->CM_GAMCOR_RAMB_START_CNTL_G;
249 gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R)dpp->tf_regs->CM_GAMCOR_RAMB_START_CNTL_R;
250 gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B)dpp->tf_regs->CM_GAMCOR_RAMB_START_SLOPE_CNTL_B;
251 gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G)dpp->tf_regs->CM_GAMCOR_RAMB_START_SLOPE_CNTL_G;
252 gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R)dpp->tf_regs->CM_GAMCOR_RAMB_START_SLOPE_CNTL_R;
253 gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B)dpp->tf_regs->CM_GAMCOR_RAMB_END_CNTL1_B;
254 gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B)dpp->tf_regs->CM_GAMCOR_RAMB_END_CNTL2_B;
255 gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G)dpp->tf_regs->CM_GAMCOR_RAMB_END_CNTL1_G;
256 gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G)dpp->tf_regs->CM_GAMCOR_RAMB_END_CNTL2_G;
257 gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R)dpp->tf_regs->CM_GAMCOR_RAMB_END_CNTL1_R;
258 gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R)dpp->tf_regs->CM_GAMCOR_RAMB_END_CNTL2_R;
259 gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1)dpp->tf_regs->CM_GAMCOR_RAMB_REGION_0_1;
260 gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33)dpp->tf_regs->CM_GAMCOR_RAMB_REGION_32_33;
261 //New registers in DCN3AG/DCN GAMCOR block
262 gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B)dpp->tf_regs->CM_GAMCOR_RAMB_OFFSET_B;
263 gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G)dpp->tf_regs->CM_GAMCOR_RAMB_OFFSET_G;
264 gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R)dpp->tf_regs->CM_GAMCOR_RAMB_OFFSET_R;
265 gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B)dpp->tf_regs->CM_GAMCOR_RAMB_START_BASE_CNTL_B;
266 gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G)dpp->tf_regs->CM_GAMCOR_RAMB_START_BASE_CNTL_G;
267 gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R)dpp->tf_regs->CM_GAMCOR_RAMB_START_BASE_CNTL_R;
268 } else {
269 gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B)dpp->tf_regs->CM_GAMCOR_RAMA_START_CNTL_B;
270 gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G)dpp->tf_regs->CM_GAMCOR_RAMA_START_CNTL_G;
271 gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R)dpp->tf_regs->CM_GAMCOR_RAMA_START_CNTL_R;
272 gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B)dpp->tf_regs->CM_GAMCOR_RAMA_START_SLOPE_CNTL_B;
273 gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G)dpp->tf_regs->CM_GAMCOR_RAMA_START_SLOPE_CNTL_G;
274 gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R)dpp->tf_regs->CM_GAMCOR_RAMA_START_SLOPE_CNTL_R;
275 gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B)dpp->tf_regs->CM_GAMCOR_RAMA_END_CNTL1_B;
276 gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B)dpp->tf_regs->CM_GAMCOR_RAMA_END_CNTL2_B;
277 gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G)dpp->tf_regs->CM_GAMCOR_RAMA_END_CNTL1_G;
278 gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G)dpp->tf_regs->CM_GAMCOR_RAMA_END_CNTL2_G;
279 gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R)dpp->tf_regs->CM_GAMCOR_RAMA_END_CNTL1_R;
280 gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R)dpp->tf_regs->CM_GAMCOR_RAMA_END_CNTL2_R;
281 gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1)dpp->tf_regs->CM_GAMCOR_RAMA_REGION_0_1;
282 gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33)dpp->tf_regs->CM_GAMCOR_RAMA_REGION_32_33;
283 //New registers in DCN3AG/DCN GAMCOR block
284 gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B)dpp->tf_regs->CM_GAMCOR_RAMA_OFFSET_B;
285 gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G)dpp->tf_regs->CM_GAMCOR_RAMA_OFFSET_G;
286 gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R)dpp->tf_regs->CM_GAMCOR_RAMA_OFFSET_R;
287 gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B)dpp->tf_regs->CM_GAMCOR_RAMA_START_BASE_CNTL_B;
288 gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G)dpp->tf_regs->CM_GAMCOR_RAMA_START_BASE_CNTL_G;
289 gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R)dpp->tf_regs->CM_GAMCOR_RAMA_START_BASE_CNTL_R;
290 }
291
292 //get register fields
293 dpp3_gamcor_reg_field(dpp, &gam_regs);
294
295 //program register set for LUTA/LUTB
296 cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
297
298 dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
299 next_mode == LUT_RAM_A ? true1:false0);
300
301 //select Gamma LUT to use for next frame
302 REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMCOR_CONTROL
, 1, dpp->tf_shift->CM_GAMCOR_SELECT, dpp->tf_mask->
CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1)
;
303
304 return true1;
305}
306
307void dpp3_set_hdr_multiplier(
308 struct dpp *dpp_base,
309 uint32_t multiplier)
310{
311 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
312
313 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier)generic_reg_update_ex(dpp->base.ctx, dpp->tf_regs->CM_HDR_MULT_COEF
, 1, dpp->tf_shift->CM_HDR_MULT_COEF, dpp->tf_mask->
CM_HDR_MULT_COEF, multiplier)
;
314}
315
316
317static void program_gamut_remap(
318 struct dcn3_dpp *dpp,
319 const uint16_t *regval,
320 int select)
321{
322 uint16_t selection = 0;
323 struct color_matrices_reg gam_regs;
324
325 if (regval == NULL((void *)0) || select == GAMUT_REMAP_BYPASS) {
326 REG_SET(CM_GAMUT_REMAP_CONTROL, 0,generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMUT_REMAP_CONTROL
, 0, 1, dpp->tf_shift->CM_GAMUT_REMAP_MODE, dpp->tf_mask
->CM_GAMUT_REMAP_MODE, 0)
327 CM_GAMUT_REMAP_MODE, 0)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMUT_REMAP_CONTROL
, 0, 1, dpp->tf_shift->CM_GAMUT_REMAP_MODE, dpp->tf_mask
->CM_GAMUT_REMAP_MODE, 0)
;
328 return;
329 }
330 switch (select) {
331 case GAMUT_REMAP_COEFF:
332 selection = 1;
333 break;
334 /*this corresponds to GAMUT_REMAP coefficients set B
335 *we don't have common coefficient sets in dcn3ag/dcn3
336 */
337 case GAMUT_REMAP_COMA_COEFF:
338 selection = 2;
339 break;
340 default:
341 break;
342 }
343
344 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
345 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
346 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
347 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
348
349
350 if (select == GAMUT_REMAP_COEFF) {
351 gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12)dpp->tf_regs->CM_GAMUT_REMAP_C11_C12;
352 gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34)dpp->tf_regs->CM_GAMUT_REMAP_C33_C34;
353
354 cm_helper_program_color_matrices(
355 dpp->base.ctx,
356 regval,
357 &gam_regs);
358
359 } else if (select == GAMUT_REMAP_COMA_COEFF) {
360
361 gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12)dpp->tf_regs->CM_GAMUT_REMAP_B_C11_C12;
362 gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34)dpp->tf_regs->CM_GAMUT_REMAP_B_C33_C34;
363
364 cm_helper_program_color_matrices(
365 dpp->base.ctx,
366 regval,
367 &gam_regs);
368
369 }
370 //select coefficient set to use
371 REG_SET(generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMUT_REMAP_CONTROL
, 0, 1, dpp->tf_shift->CM_GAMUT_REMAP_MODE, dpp->tf_mask
->CM_GAMUT_REMAP_MODE, selection)
372 CM_GAMUT_REMAP_CONTROL, 0,generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMUT_REMAP_CONTROL
, 0, 1, dpp->tf_shift->CM_GAMUT_REMAP_MODE, dpp->tf_mask
->CM_GAMUT_REMAP_MODE, selection)
373 CM_GAMUT_REMAP_MODE, selection)generic_reg_set_ex(dpp->base.ctx, dpp->tf_regs->CM_GAMUT_REMAP_CONTROL
, 0, 1, dpp->tf_shift->CM_GAMUT_REMAP_MODE, dpp->tf_mask
->CM_GAMUT_REMAP_MODE, selection)
;
374}
375
376void dpp3_cm_set_gamut_remap(
377 struct dpp *dpp_base,
378 const struct dpp_grph_csc_adjustment *adjust)
379{
380 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base)({ const __typeof( ((struct dcn3_dpp *)0)->base ) *__mptr =
(dpp_base); (struct dcn3_dpp *)( (char *)__mptr - __builtin_offsetof
(struct dcn3_dpp, base) );})
;
381 int i = 0;
382 int gamut_mode;
383
384 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
385 /* Bypass if type is bypass or hw */
386 program_gamut_remap(dpp, NULL((void *)0), GAMUT_REMAP_BYPASS);
387 else {
388 struct fixed31_32 arr_matrix[12];
389 uint16_t arr_reg_val[12];
390
391 for (i = 0; i < 12; i++)
392 arr_matrix[i] = adjust->temperature_matrix[i];
393
394 convert_float_matrix(
395 arr_reg_val, arr_matrix, 12);
396
397 //current coefficient set in use
398 REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode)generic_reg_get(dpp->base.ctx, dpp->tf_regs->CM_GAMUT_REMAP_CONTROL
, dpp->tf_shift->CM_GAMUT_REMAP_MODE_CURRENT, dpp->tf_mask
->CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode)
;
399
400 if (gamut_mode == 0)
401 gamut_mode = 1; //use coefficient set A
402 else if (gamut_mode == 1)
403 gamut_mode = 2;
404 else
405 gamut_mode = 1;
406
407 //follow dcn2 approach for now - using only coefficient set A
408 program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
409 }
410}