Bug Summary

File:dev/pci/drm/amd/amdgpu/df_v3_6.c
Warning:line 470, column 21
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name df_v3_6.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/amdgpu/df_v3_6.c
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "df_v3_6.h"
25
26#include "df/df_3_6_default.h"
27#include "df/df_3_6_offset.h"
28#include "df/df_3_6_sh_mask.h"
29
30#define DF_3_6_SMN_REG_INST_DIST0x8 0x8
31#define DF_3_6_INST_CNT8 8
32
33static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
34 16, 32, 0, 0, 0, 2, 4, 8};
35
36#ifdef __linux__
37/* init df format attrs */
38AMDGPU_PMU_ATTR(event, "config:0-7")static ssize_t event_show(struct device *dev, struct device_attribute
*attr, char *page) { extern char _ctassert[(!(sizeof("config:0-7"
) >= (1 << 12) - 1)) ? 1 : -1 ] __attribute__((__unused__
)); return sprintf(page, "config:0-7" "\n"); } static struct device_attribute
pmu_attr_event = __ATTR_RO(event)
;
39AMDGPU_PMU_ATTR(instance, "config:8-15")static ssize_t instance_show(struct device *dev, struct device_attribute
*attr, char *page) { extern char _ctassert[(!(sizeof("config:8-15"
) >= (1 << 12) - 1)) ? 1 : -1 ] __attribute__((__unused__
)); return sprintf(page, "config:8-15" "\n"); } static struct
device_attribute pmu_attr_instance = __ATTR_RO(instance)
;
40AMDGPU_PMU_ATTR(umask, "config:16-23")static ssize_t umask_show(struct device *dev, struct device_attribute
*attr, char *page) { extern char _ctassert[(!(sizeof("config:16-23"
) >= (1 << 12) - 1)) ? 1 : -1 ] __attribute__((__unused__
)); return sprintf(page, "config:16-23" "\n"); } static struct
device_attribute pmu_attr_umask = __ATTR_RO(umask)
;
41
42/* df format attributes */
43static struct attribute *df_v3_6_format_attrs[] = {
44 &pmu_attr_event.attr,
45 &pmu_attr_instance.attr,
46 &pmu_attr_umask.attr,
47 NULL((void *)0)
48};
49
50/* df format attribute group */
51static struct attribute_group df_v3_6_format_attr_group = {
52 .name = "format",
53 .attrs = df_v3_6_format_attrs,
54};
55
56/* df event attrs */
57AMDGPU_PMU_ATTR(cake0_pcsout_txdata,static ssize_t cake0_pcsout_txdata_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x46,umask=0x2") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x46,umask=0x2" "\n"); } static struct
device_attribute pmu_attr_cake0_pcsout_txdata = __ATTR_RO(cake0_pcsout_txdata
)
58 "event=0x7,instance=0x46,umask=0x2")static ssize_t cake0_pcsout_txdata_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x46,umask=0x2") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x46,umask=0x2" "\n"); } static struct
device_attribute pmu_attr_cake0_pcsout_txdata = __ATTR_RO(cake0_pcsout_txdata
)
;
59AMDGPU_PMU_ATTR(cake1_pcsout_txdata,static ssize_t cake1_pcsout_txdata_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x47,umask=0x2") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x47,umask=0x2" "\n"); } static struct
device_attribute pmu_attr_cake1_pcsout_txdata = __ATTR_RO(cake1_pcsout_txdata
)
60 "event=0x7,instance=0x47,umask=0x2")static ssize_t cake1_pcsout_txdata_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x47,umask=0x2") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x47,umask=0x2" "\n"); } static struct
device_attribute pmu_attr_cake1_pcsout_txdata = __ATTR_RO(cake1_pcsout_txdata
)
;
61AMDGPU_PMU_ATTR(cake0_pcsout_txmeta,static ssize_t cake0_pcsout_txmeta_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x46,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x46,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake0_pcsout_txmeta = __ATTR_RO(cake0_pcsout_txmeta
)
62 "event=0x7,instance=0x46,umask=0x4")static ssize_t cake0_pcsout_txmeta_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x46,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x46,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake0_pcsout_txmeta = __ATTR_RO(cake0_pcsout_txmeta
)
;
63AMDGPU_PMU_ATTR(cake1_pcsout_txmeta,static ssize_t cake1_pcsout_txmeta_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x47,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x47,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake1_pcsout_txmeta = __ATTR_RO(cake1_pcsout_txmeta
)
64 "event=0x7,instance=0x47,umask=0x4")static ssize_t cake1_pcsout_txmeta_show(struct device *dev, struct
device_attribute *attr, char *page) { extern char _ctassert[
(!(sizeof("event=0x7,instance=0x47,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0x7,instance=0x47,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake1_pcsout_txmeta = __ATTR_RO(cake1_pcsout_txmeta
)
;
65AMDGPU_PMU_ATTR(cake0_ftiinstat_reqalloc,static ssize_t cake0_ftiinstat_reqalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x46,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x46,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake0_ftiinstat_reqalloc = __ATTR_RO
(cake0_ftiinstat_reqalloc)
66 "event=0xb,instance=0x46,umask=0x4")static ssize_t cake0_ftiinstat_reqalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x46,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x46,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake0_ftiinstat_reqalloc = __ATTR_RO
(cake0_ftiinstat_reqalloc)
;
67AMDGPU_PMU_ATTR(cake1_ftiinstat_reqalloc,static ssize_t cake1_ftiinstat_reqalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x47,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x47,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake1_ftiinstat_reqalloc = __ATTR_RO
(cake1_ftiinstat_reqalloc)
68 "event=0xb,instance=0x47,umask=0x4")static ssize_t cake1_ftiinstat_reqalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x47,umask=0x4") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x47,umask=0x4" "\n"); } static struct
device_attribute pmu_attr_cake1_ftiinstat_reqalloc = __ATTR_RO
(cake1_ftiinstat_reqalloc)
;
69AMDGPU_PMU_ATTR(cake0_ftiinstat_rspalloc,static ssize_t cake0_ftiinstat_rspalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x46,umask=0x8") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x46,umask=0x8" "\n"); } static struct
device_attribute pmu_attr_cake0_ftiinstat_rspalloc = __ATTR_RO
(cake0_ftiinstat_rspalloc)
70 "event=0xb,instance=0x46,umask=0x8")static ssize_t cake0_ftiinstat_rspalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x46,umask=0x8") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x46,umask=0x8" "\n"); } static struct
device_attribute pmu_attr_cake0_ftiinstat_rspalloc = __ATTR_RO
(cake0_ftiinstat_rspalloc)
;
71AMDGPU_PMU_ATTR(cake1_ftiinstat_rspalloc,static ssize_t cake1_ftiinstat_rspalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x47,umask=0x8") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x47,umask=0x8" "\n"); } static struct
device_attribute pmu_attr_cake1_ftiinstat_rspalloc = __ATTR_RO
(cake1_ftiinstat_rspalloc)
72 "event=0xb,instance=0x47,umask=0x8")static ssize_t cake1_ftiinstat_rspalloc_show(struct device *dev
, struct device_attribute *attr, char *page) { extern char _ctassert
[(!(sizeof("event=0xb,instance=0x47,umask=0x8") >= (1 <<
12) - 1)) ? 1 : -1 ] __attribute__((__unused__)); return sprintf
(page, "event=0xb,instance=0x47,umask=0x8" "\n"); } static struct
device_attribute pmu_attr_cake1_ftiinstat_rspalloc = __ATTR_RO
(cake1_ftiinstat_rspalloc)
;
73
74/* df event attributes */
75static struct attribute *df_v3_6_event_attrs[] = {
76 &pmu_attr_cake0_pcsout_txdata.attr,
77 &pmu_attr_cake1_pcsout_txdata.attr,
78 &pmu_attr_cake0_pcsout_txmeta.attr,
79 &pmu_attr_cake1_pcsout_txmeta.attr,
80 &pmu_attr_cake0_ftiinstat_reqalloc.attr,
81 &pmu_attr_cake1_ftiinstat_reqalloc.attr,
82 &pmu_attr_cake0_ftiinstat_rspalloc.attr,
83 &pmu_attr_cake1_ftiinstat_rspalloc.attr,
84 NULL((void *)0)
85};
86
87/* df event attribute group */
88static struct attribute_group df_v3_6_event_attr_group = {
89 .name = "events",
90 .attrs = df_v3_6_event_attrs
91};
92
93/* df event attr groups */
94const struct attribute_group *df_v3_6_attr_groups[] = {
95 &df_v3_6_format_attr_group,
96 &df_v3_6_event_attr_group,
97 NULL((void *)0)
98};
99
100#endif /* __linux__ */
101
102static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
103 uint32_t ficaa_val)
104{
105 unsigned long flags, address, data;
106 uint32_t ficadl_val, ficadh_val;
107
108 address = adev->nbio.funcs->get_pcie_index_offset(adev);
109 data = adev->nbio.funcs->get_pcie_data_offset(adev);
110
111 spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while
(0)
;
112 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3)amdgpu_device_wreg(adev, (address), (0x1d05cUL), 0);
113 WREG32(data, ficaa_val)amdgpu_device_wreg(adev, (data), (ficaa_val), 0);
114
115 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3)amdgpu_device_wreg(adev, (address), (0x1d098UL), 0);
116 ficadl_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0);
117
118 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3)amdgpu_device_wreg(adev, (address), (0x1d09cUL), 0);
119 ficadh_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0);
120
121 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); }
while (0)
;
122
123 return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
124}
125
126static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
127 uint32_t ficadl_val, uint32_t ficadh_val)
128{
129 unsigned long flags, address, data;
130
131 address = adev->nbio.funcs->get_pcie_index_offset(adev);
132 data = adev->nbio.funcs->get_pcie_data_offset(adev);
133
134 spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while
(0)
;
135 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3)amdgpu_device_wreg(adev, (address), (0x1d05cUL), 0);
136 WREG32(data, ficaa_val)amdgpu_device_wreg(adev, (data), (ficaa_val), 0);
137
138 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3)amdgpu_device_wreg(adev, (address), (0x1d098UL), 0);
139 WREG32(data, ficadl_val)amdgpu_device_wreg(adev, (data), (ficadl_val), 0);
140
141 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3)amdgpu_device_wreg(adev, (address), (0x1d09cUL), 0);
142 WREG32(data, ficadh_val)amdgpu_device_wreg(adev, (data), (ficadh_val), 0);
143
144 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); }
while (0)
;
145}
146
147/*
148 * df_v3_6_perfmon_rreg - read perfmon lo and hi
149 *
150 * required to be atomic. no mmio method provided so subsequent reads for lo
151 * and hi require to preserve df finite state machine
152 */
153static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
154 uint32_t lo_addr, uint32_t *lo_val,
155 uint32_t hi_addr, uint32_t *hi_val)
156{
157 unsigned long flags, address, data;
158
159 address = adev->nbio.funcs->get_pcie_index_offset(adev);
160 data = adev->nbio.funcs->get_pcie_data_offset(adev);
161
162 spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while
(0)
;
163 WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0);
164 *lo_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0);
165 WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0);
166 *hi_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0);
167 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); }
while (0)
;
168}
169
170/*
171 * df_v3_6_perfmon_wreg - write to perfmon lo and hi
172 *
173 * required to be atomic. no mmio method provided so subsequent reads after
174 * data writes cannot occur to preserve data fabrics finite state machine.
175 */
176static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
177 uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
178{
179 unsigned long flags, address, data;
180
181 address = adev->nbio.funcs->get_pcie_index_offset(adev);
182 data = adev->nbio.funcs->get_pcie_data_offset(adev);
183
184 spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while
(0)
;
185 WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0);
186 WREG32(data, lo_val)amdgpu_device_wreg(adev, (data), (lo_val), 0);
187 WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0);
188 WREG32(data, hi_val)amdgpu_device_wreg(adev, (data), (hi_val), 0);
189 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); }
while (0)
;
190}
191
192/* same as perfmon_wreg but return status on write value check */
193static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
194 uint32_t lo_addr, uint32_t lo_val,
195 uint32_t hi_addr, uint32_t hi_val)
196{
197 unsigned long flags, address, data;
198 uint32_t lo_val_rb, hi_val_rb;
199
200 address = adev->nbio.funcs->get_pcie_index_offset(adev);
201 data = adev->nbio.funcs->get_pcie_data_offset(adev);
202
203 spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while
(0)
;
204 WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0);
205 WREG32(data, lo_val)amdgpu_device_wreg(adev, (data), (lo_val), 0);
206 WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0);
207 WREG32(data, hi_val)amdgpu_device_wreg(adev, (data), (hi_val), 0);
208
209 WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0);
210 lo_val_rb = RREG32(data)amdgpu_device_rreg(adev, (data), 0);
211 WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0);
212 hi_val_rb = RREG32(data)amdgpu_device_rreg(adev, (data), 0);
213 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); }
while (0)
;
214
215 if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
216 return -EBUSY16;
217
218 return 0;
219}
220
221
222/*
223 * retry arming counters every 100 usecs within 1 millisecond interval.
224 * if retry fails after time out, return error.
225 */
226#define ARM_RETRY_USEC_TIMEOUT1000 1000
227#define ARM_RETRY_USEC_INTERVAL100 100
228static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
229 uint32_t lo_addr, uint32_t lo_val,
230 uint32_t hi_addr, uint32_t hi_val)
231{
232 int countdown = ARM_RETRY_USEC_TIMEOUT1000;
233
234 while (countdown) {
235
236 if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
237 hi_addr, hi_val))
238 break;
239
240 countdown -= ARM_RETRY_USEC_INTERVAL100;
241 udelay(ARM_RETRY_USEC_INTERVAL100);
242 }
243
244 return countdown > 0 ? 0 : -ETIME60;
245}
246
247/* get the number of df counters available */
248static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
249 struct device_attribute *attr,
250 char *buf)
251{
252 struct amdgpu_device *adev;
253 struct drm_device *ddev;
254 int i, count;
255
256 ddev = dev_get_drvdata(dev)((void *)0);
257 adev = drm_to_adev(ddev);
258 count = 0;
259
260 for (i = 0; i < DF_V3_6_MAX_COUNTERS4; i++) {
261 if (adev->df_perfmon_config_assign_mask[i] == 0)
262 count++;
263 }
264
265 return snprintf(buf, PAGE_SIZE(1 << 12), "%i\n", count);
266}
267
268/* device attr for available perfmon counters */
269static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL)struct device_attribute dev_attr_df_cntr_avail;
270
271static void df_v3_6_query_hashes(struct amdgpu_device *adev)
272{
273 u32 tmp;
274
275 adev->df.hash_status.hash_64k = false0;
276 adev->df.hash_status.hash_2m = false0;
277 adev->df.hash_status.hash_1g = false0;
278
279 if (adev->asic_type != CHIP_ARCTURUS)
280 return;
281
282 /* encoding for hash-enabled on Arcturus */
283 if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) {
284 tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl)amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x00fe), 0)
;
285 adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,(((tmp) & 0x00100000L) >> 0x14)
286 DF_CS_UMC_AON0_DfGlobalCtrl,(((tmp) & 0x00100000L) >> 0x14)
287 GlbHashIntlvCtl64K)(((tmp) & 0x00100000L) >> 0x14);
288 adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,(((tmp) & 0x00200000L) >> 0x15)
289 DF_CS_UMC_AON0_DfGlobalCtrl,(((tmp) & 0x00200000L) >> 0x15)
290 GlbHashIntlvCtl2M)(((tmp) & 0x00200000L) >> 0x15);
291 adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,(((tmp) & 0x00400000L) >> 0x16)
292 DF_CS_UMC_AON0_DfGlobalCtrl,(((tmp) & 0x00400000L) >> 0x16)
293 GlbHashIntlvCtl1G)(((tmp) & 0x00400000L) >> 0x16);
294 }
295}
296
297/* init perfmons */
298static void df_v3_6_sw_init(struct amdgpu_device *adev)
299{
300 int i, ret;
301
302 ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail)0;
303 if (ret)
304 DRM_ERROR("failed to create file for available df counters\n")__drm_err("failed to create file for available df counters\n"
)
;
305
306 for (i = 0; i < AMDGPU_MAX_DF_PERFMONS4; i++)
307 adev->df_perfmon_config_assign_mask[i] = 0;
308
309 df_v3_6_query_hashes(adev);
310}
311
312static void df_v3_6_sw_fini(struct amdgpu_device *adev)
313{
314
315 device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
316
317}
318
319static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
320 bool_Bool enable)
321{
322 u32 tmp;
323
324 if (enable) {
325 tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl)amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x0410), 0)
;
326 tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK0x00000001L;
327 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[DF_HWIP][0][0]
+ 0x0410)), (tmp), 0)
;
328 } else
329 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,amdgpu_device_wreg(adev, ((adev->reg_offset[DF_HWIP][0][0]
+ 0x0410)), (0x00000000), 0)
330 mmFabricConfigAccessControl_DEFAULT)amdgpu_device_wreg(adev, ((adev->reg_offset[DF_HWIP][0][0]
+ 0x0410)), (0x00000000), 0)
;
331}
332
333static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
334{
335 u32 tmp;
336
337 tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0)amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x0044), 0)
;
338 tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK0x0000003CL;
339 tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT0x2;
340
341 return tmp;
342}
343
344static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
345{
346 int fb_channel_number;
347
348 fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
349 if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)(sizeof((df_v3_6_channel_number)) / sizeof((df_v3_6_channel_number
)[0]))
)
350 fb_channel_number = 0;
351
352 return df_v3_6_channel_number[fb_channel_number];
353}
354
355static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
356 bool_Bool enable)
357{
358 u32 tmp;
359
360 if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG(1 << 23)) {
361 /* Put DF on broadcast mode */
362 adev->df.funcs->enable_broadcast_mode(adev, true1);
363
364 if (enable) {
365 tmp = RREG32_SOC15(DF, 0,amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x00fc), 0)
366 mmDF_PIE_AON0_DfGlobalClkGater)amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x00fc), 0)
;
367 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK0x0000000FL;
368 tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
369 WREG32_SOC15(DF, 0,amdgpu_device_wreg(adev, ((adev->reg_offset[DF_HWIP][0][0]
+ 0x00fc)), (tmp), 0)
370 mmDF_PIE_AON0_DfGlobalClkGater, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[DF_HWIP][0][0]
+ 0x00fc)), (tmp), 0)
;
371 } else {
372 tmp = RREG32_SOC15(DF, 0,amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x00fc), 0)
373 mmDF_PIE_AON0_DfGlobalClkGater)amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x00fc), 0)
;
374 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK0x0000000FL;
375 tmp |= DF_V3_6_MGCG_DISABLE;
376 WREG32_SOC15(DF, 0,amdgpu_device_wreg(adev, ((adev->reg_offset[DF_HWIP][0][0]
+ 0x00fc)), (tmp), 0)
377 mmDF_PIE_AON0_DfGlobalClkGater, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[DF_HWIP][0][0]
+ 0x00fc)), (tmp), 0)
;
378 }
379
380 /* Exit broadcast mode */
381 adev->df.funcs->enable_broadcast_mode(adev, false0);
382 }
383}
384
385static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
386 u32 *flags)
387{
388 u32 tmp;
389
390 /* AMD_CG_SUPPORT_DF_MGCG */
391 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)amdgpu_device_rreg(adev, (adev->reg_offset[DF_HWIP][0][0] +
0x00fc), 0)
;
392 if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
393 *flags |= AMD_CG_SUPPORT_DF_MGCG(1 << 23);
394}
395
396/* get assigned df perfmon ctr as int */
397static int df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev,
398 uint64_t config)
399{
400 int i;
401
402 for (i = 0; i < DF_V3_6_MAX_COUNTERS4; i++) {
403 if ((config & 0x0FFFFFFUL) ==
404 adev->df_perfmon_config_assign_mask[i])
405 return i;
406 }
407
408 return -EINVAL22;
409}
410
411/* get address based on counter assignment */
412static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
413 uint64_t config,
414 int is_ctrl,
415 uint32_t *lo_base_addr,
416 uint32_t *hi_base_addr)
417{
418 int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
419
420 if (target_cntr < 0)
5
Assuming 'target_cntr' is < 0
6
Taking true branch
421 return;
422
423 switch (target_cntr) {
424
425 case 0:
426 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo40x01d880UL : smnPerfMonCtrLo40x01d790UL;
427 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi40x01d884UL : smnPerfMonCtrHi40x01d794UL;
428 break;
429 case 1:
430 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo50x01d888UL : smnPerfMonCtrLo50x01d798UL;
431 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi50x01d88cUL : smnPerfMonCtrHi50x01d79cUL;
432 break;
433 case 2:
434 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo60x01d890UL : smnPerfMonCtrLo60x01d7a0UL;
435 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi60x01d894UL : smnPerfMonCtrHi60x01d7a4UL;
436 break;
437 case 3:
438 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo70x01d898UL : smnPerfMonCtrLo70x01d7a8UL;
439 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi70x01d89cUL : smnPerfMonCtrHi70x01d7acUL;
440 break;
441
442 }
443
444}
445
446/* get read counter address */
447static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
448 uint64_t config,
449 uint32_t *lo_base_addr,
450 uint32_t *hi_base_addr)
451{
452 df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr);
453}
454
455/* get control counter settings i.e. address and values to set */
456static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
457 uint64_t config,
458 uint32_t *lo_base_addr,
459 uint32_t *hi_base_addr,
460 uint32_t *lo_val,
461 uint32_t *hi_val,
462 bool_Bool is_enable)
463{
464
465 uint32_t eventsel, instance, unitmask;
466 uint32_t instance_10, instance_5432, instance_76;
467
468 df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
4
Calling 'df_v3_6_pmc_get_addr'
7
Returning from 'df_v3_6_pmc_get_addr'
469
470 if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
8
The left operand of '==' is a garbage value
471 DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x",__drm_err("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x"
, *lo_base_addr, *hi_base_addr)
472 *lo_base_addr, *hi_base_addr)__drm_err("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x"
, *lo_base_addr, *hi_base_addr)
;
473 return -ENXIO6;
474 }
475
476 eventsel = DF_V3_6_GET_EVENT(config)(config & 0xFFUL) & 0x3f;
477 unitmask = DF_V3_6_GET_UNITMASK(config)((config >> 16) & 0xFFUL) & 0xf;
478 instance = DF_V3_6_GET_INSTANCE(config)((config >> 8) & 0xFFUL);
479
480 instance_10 = instance & 0x3;
481 instance_5432 = (instance >> 2) & 0xf;
482 instance_76 = (instance >> 6) & 0x3;
483
484 *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel;
485 *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22);
486 *hi_val = (instance_76 << 29) | instance_5432;
487
488 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",__drm_dbg(DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x"
, config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val)
489 config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val)__drm_dbg(DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x"
, config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val)
;
490
491 return 0;
492}
493
494/* add df performance counters for read */
495static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
496 uint64_t config)
497{
498 int i, target_cntr;
499
500 target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
501
502 if (target_cntr >= 0)
503 return 0;
504
505 for (i = 0; i < DF_V3_6_MAX_COUNTERS4; i++) {
506 if (adev->df_perfmon_config_assign_mask[i] == 0U) {
507 adev->df_perfmon_config_assign_mask[i] =
508 config & 0x0FFFFFFUL;
509 return 0;
510 }
511 }
512
513 return -ENOSPC28;
514}
515
516#define DEFERRED_ARM_MASK(1 << 31) (1 << 31)
517static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
518 uint64_t config, bool_Bool is_deferred)
519{
520 int target_cntr;
521
522 target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
523
524 if (target_cntr < 0)
525 return -EINVAL22;
526
527 if (is_deferred)
528 adev->df_perfmon_config_assign_mask[target_cntr] |=
529 DEFERRED_ARM_MASK(1 << 31);
530 else
531 adev->df_perfmon_config_assign_mask[target_cntr] &=
532 ~DEFERRED_ARM_MASK(1 << 31);
533
534 return 0;
535}
536
537static bool_Bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
538 uint64_t config)
539{
540 int target_cntr;
541
542 target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
543
544 /*
545 * we never get target_cntr < 0 since this funciton is only called in
546 * pmc_count for now but we should check anyways.
547 */
548 return (target_cntr >= 0 &&
549 (adev->df_perfmon_config_assign_mask[target_cntr]
550 & DEFERRED_ARM_MASK(1 << 31)));
551
552}
553
554/* release performance counter */
555static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
556 uint64_t config)
557{
558 int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
559
560 if (target_cntr >= 0)
561 adev->df_perfmon_config_assign_mask[target_cntr] = 0ULL;
562}
563
564
565static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
566 uint64_t config)
567{
568 uint32_t lo_base_addr = 0, hi_base_addr = 0;
569
570 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
571 &hi_base_addr);
572
573 if ((lo_base_addr == 0) || (hi_base_addr == 0))
574 return;
575
576 df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
577}
578
579static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
580 int is_add)
581{
582 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
583 int err = 0, ret = 0;
584
585 switch (adev->asic_type) {
586 case CHIP_VEGA20:
587 if (is_add)
588 return df_v3_6_pmc_add_cntr(adev, config);
589
590 df_v3_6_reset_perfmon_cntr(adev, config);
591
592 ret = df_v3_6_pmc_get_ctrl_settings(adev,
593 config,
594 &lo_base_addr,
595 &hi_base_addr,
596 &lo_val,
597 &hi_val,
598 true1);
599
600 if (ret)
601 return ret;
602
603 err = df_v3_6_perfmon_arm_with_retry(adev,
604 lo_base_addr,
605 lo_val,
606 hi_base_addr,
607 hi_val);
608
609 if (err)
610 ret = df_v3_6_pmc_set_deferred(adev, config, true1);
611
612 break;
613 default:
614 break;
615 }
616
617 return ret;
618}
619
620static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
621 int is_remove)
622{
623 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
1
'lo_base_addr' declared without an initial value
624 int ret = 0;
625
626 switch (adev->asic_type) {
2
Control jumps to 'case CHIP_VEGA20:' at line 627
627 case CHIP_VEGA20:
628 ret = df_v3_6_pmc_get_ctrl_settings(adev,
3
Calling 'df_v3_6_pmc_get_ctrl_settings'
629 config,
630 &lo_base_addr,
631 &hi_base_addr,
632 &lo_val,
633 &hi_val,
634 false0);
635
636 if (ret)
637 return ret;
638
639
640 if (is_remove) {
641 df_v3_6_reset_perfmon_cntr(adev, config);
642 df_v3_6_pmc_release_cntr(adev, config);
643 }
644
645 break;
646 default:
647 break;
648 }
649
650 return ret;
651}
652
653static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
654 uint64_t config,
655 uint64_t *count)
656{
657 uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0;
658 *count = 0;
659
660 switch (adev->asic_type) {
661 case CHIP_VEGA20:
662 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
663 &hi_base_addr);
664
665 if ((lo_base_addr == 0) || (hi_base_addr == 0))
666 return;
667
668 /* rearm the counter or throw away count value on failure */
669 if (df_v3_6_pmc_is_deferred(adev, config)) {
670 int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
671 lo_base_addr, lo_val,
672 hi_base_addr, hi_val);
673
674 if (rearm_err)
675 return;
676
677 df_v3_6_pmc_set_deferred(adev, config, false0);
678 }
679
680 df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
681 hi_base_addr, &hi_val);
682
683 *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
684
685 if (*count >= DF_V3_6_PERFMON_OVERFLOW0xFFFFFFFFFFFFULL)
686 *count = 0;
687
688 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",__drm_dbg(DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x"
, config, lo_base_addr, hi_base_addr, lo_val, hi_val)
689 config, lo_base_addr, hi_base_addr, lo_val, hi_val)__drm_dbg(DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x"
, config, lo_base_addr, hi_base_addr, lo_val, hi_val)
;
690
691 break;
692 default:
693 break;
694 }
695}
696
697const struct amdgpu_df_funcs df_v3_6_funcs = {
698 .sw_init = df_v3_6_sw_init,
699 .sw_fini = df_v3_6_sw_fini,
700 .enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
701 .get_fb_channel_number = df_v3_6_get_fb_channel_number,
702 .get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
703 .update_medium_grain_clock_gating =
704 df_v3_6_update_medium_grain_clock_gating,
705 .get_clockgating_state = df_v3_6_get_clockgating_state,
706 .pmc_start = df_v3_6_pmc_start,
707 .pmc_stop = df_v3_6_pmc_stop,
708 .pmc_get_count = df_v3_6_pmc_get_count,
709 .get_fica = df_v3_6_get_fica,
710 .set_fica = df_v3_6_set_fica,
711};