diff options
author | Dave Airlie <airlied@redhat.com> | 2018-03-26 10:01:11 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-03-26 10:01:11 +1000 |
commit | 33d009cd889490838c5db9b9339856c9e3d3facc (patch) | |
tree | a447078a59708c6b8ebe0a737a3be404ac98bd53 /drivers/gpu/drm/amd/amdgpu | |
parent | b4eec0fa537165efc3265cdbb4bac06e6bdaf596 (diff) | |
parent | 09695ad78f1f5f315c7e9c5090f0c7b846a43690 (diff) | |
download | linux-33d009cd889490838c5db9b9339856c9e3d3facc.tar.gz linux-33d009cd889490838c5db9b9339856c9e3d3facc.tar.xz |
Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
Last pull for 4.17. Highlights:
- Vega12 support
- A few more bug fixes and cleanups for powerplay
* 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (77 commits)
drm/amd/pp: clean header file hwmgr.h
drm/amd/pp: use mlck_table.count for array loop index limit
drm/amdgpu: Add an ATPX quirk for hybrid laptop
drm/amdgpu: fix spelling mistake: "asssert" -> "assert"
drm/amd/pp: Add new asic support in pp_psm.c
drm/amd/pp: Clean up powerplay code on Vega12
drm/amd/pp: Add smu irq handlers for legacy asics
drm/amd/pp: Fix set wrong temperature range on smu7
drm/amdgpu: Don't change preferred domian when fallback GTT v5
drm/amdgpu: Fix NULL ptr on driver unload due to init failure.
drm/amdgpu: fix "mitigate workaround for i915"
drm/amd/pp: Add smu irq handlers in sw_init instand of hw_init
drm/amd/pp: Refine register_thermal_interrupt function
drm/amdgpu: Remove wrapper layer of cgs irq handling
drm/amd/powerplay: Return per DPM level clock
drm/amd/powerplay: Remove the SOC floor voltage setting
drm/amdgpu: no job timeout setting on compute queues
drm/amdgpu: add vega12 pci ids (v2)
drm/amd/powerplay: add the hw manager for vega12 (v4)
drm/amd/powerplay: add the smu manager for vega12 (v4)
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
28 files changed, 643 insertions, 178 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index c53095b3b0fb..1ae5ae8c45a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -569,6 +569,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 37098c68a645..71a57b2f7f04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -28,7 +28,6 @@ #include <linux/firmware.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" -#include "cgs_linux.h" #include "atom.h" #include "amdgpu_ucode.h" @@ -182,109 +181,6 @@ static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigne adev->mode_info.atom_context, table, args); } -struct cgs_irq_params { - unsigned src_id; - cgs_irq_source_set_func_t set; - cgs_irq_handler_func_t handler; - void *private_data; -}; - -static int cgs_set_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, - unsigned type, - enum amdgpu_interrupt_state state) -{ - struct cgs_irq_params *irq_params = - (struct cgs_irq_params *)src->data; - if (!irq_params) - return -EINVAL; - if (!irq_params->set) - return -EINVAL; - return irq_params->set(irq_params->private_data, - irq_params->src_id, - type, - (int)state); -} - -static int cgs_process_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct cgs_irq_params *irq_params = - (struct cgs_irq_params *)source->data; - if (!irq_params) - return -EINVAL; - if (!irq_params->handler) - return -EINVAL; - return irq_params->handler(irq_params->private_data, - irq_params->src_id, - entry->iv_entry); -} - -static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { - .set = cgs_set_irq_state, - .process = cgs_process_irq, -}; - -static int amdgpu_cgs_add_irq_source(void *cgs_device, - unsigned client_id, - unsigned src_id, - unsigned num_types, - cgs_irq_source_set_func_t set, - cgs_irq_handler_func_t handler, - void *private_data) -{ - CGS_FUNC_ADEV; - int ret = 0; - struct cgs_irq_params *irq_params; - struct amdgpu_irq_src *source = - kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); - if (!source) - return -ENOMEM; - irq_params = - kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); - if (!irq_params) { - kfree(source); - return -ENOMEM; - } - source->num_types = num_types; - source->funcs = &cgs_irq_funcs; - irq_params->src_id = src_id; - irq_params->set = set; - irq_params->handler = handler; - irq_params->private_data = private_data; - source->data = (void *)irq_params; - ret = amdgpu_irq_add_id(adev, client_id, src_id, source); - if (ret) { - kfree(irq_params); - kfree(source); - } - - return ret; -} - -static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id, - unsigned src_id, unsigned type) -{ - CGS_FUNC_ADEV; - - if (!adev->irq.client[client_id].sources) - return -EINVAL; - - return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type); -} - -static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id, - unsigned src_id, unsigned type) -{ - CGS_FUNC_ADEV; - - if (!adev->irq.client[client_id].sources) - return -EINVAL; - - return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type); -} - static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_clockgating_state state) @@ -654,6 +550,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, else strcpy(fw_name, "amdgpu/vega10_smc.bin"); break; + case CHIP_VEGA12: + strcpy(fw_name, "amdgpu/vega12_smc.bin"); + break; default: DRM_ERROR("SMC firmware not supported\n"); return -EINVAL; @@ -715,12 +614,9 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, return -EINVAL; mode_info = info->mode_info; - if (mode_info) { + if (mode_info) /* if the displays are off, vblank time is max */ mode_info->vblank_time_us = 0xffffffff; - /* always set the reference clock */ - mode_info->ref_clock = adev->clock.spll.reference_freq; - } if (!amdgpu_device_has_dc_support(adev)) { struct amdgpu_crtc *amdgpu_crtc; @@ -795,12 +691,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, }; -static const struct cgs_os_ops amdgpu_cgs_os_ops = { - .add_irq_source = amdgpu_cgs_add_irq_source, - .irq_get = amdgpu_cgs_irq_get, - .irq_put = amdgpu_cgs_irq_put -}; - struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) { struct amdgpu_cgs_device *cgs_device = @@ -812,7 +702,6 @@ struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) } cgs_device->base.ops = &amdgpu_cgs_ops; - cgs_device->base.os_ops = &amdgpu_cgs_os_ops; cgs_device->adev = adev; return (struct cgs_device *)cgs_device; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 690cf77b950e..34af664b9f93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -59,6 +59,7 @@ #include "amdgpu_pm.h" MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -83,12 +84,21 @@ static const char *amdgpu_asic_name[] = { "POLARIS11", "POLARIS12", "VEGA10", + "VEGA12", "RAVEN", "LAST", }; static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); +/** + * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control + * + * @dev: drm_device pointer + * + * Returns true if the device is a dGPU with HG/PX power control, + * otherwise return false. + */ bool amdgpu_device_is_px(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; @@ -101,6 +111,15 @@ bool amdgpu_device_is_px(struct drm_device *dev) /* * MMIO register access helper functions. */ +/** + * amdgpu_mm_rreg - read a memory mapped IO register + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @acc_flags: access flags which require special behavior + * + * Returns the 32 bit value from the offset specified. + */ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags) { @@ -129,6 +148,14 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, * */ +/** + * amdgpu_mm_rreg8 - read a memory mapped IO register + * + * @adev: amdgpu_device pointer + * @offset: byte aligned register offset + * + * Returns the 8 bit value from the offset specified. + */ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) { if (offset < adev->rmmio_size) return (readb(adev->rmmio + offset)); @@ -141,6 +168,15 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) { * @value: the value want to be written to the register * */ +/** + * amdgpu_mm_wreg8 - read a memory mapped IO register + * + * @adev: amdgpu_device pointer + * @offset: byte aligned register offset + * @value: 8 bit value to write + * + * Writes the value specified to the offset specified. + */ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) { if (offset < adev->rmmio_size) writeb(value, adev->rmmio + offset); @@ -148,7 +184,16 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) BUG(); } - +/** + * amdgpu_mm_wreg - write to a memory mapped IO register + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @v: 32 bit value to write to the register + * @acc_flags: access flags which require special behavior + * + * Writes the value specified to the offset specified. + */ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) { @@ -177,6 +222,14 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, } } +/** + * amdgpu_io_rreg - read an IO register + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * + * Returns the 32 bit value from the offset specified. + */ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) { if ((reg * 4) < adev->rio_mem_size) @@ -187,6 +240,15 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) } } +/** + * amdgpu_io_wreg - write to an IO register + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @v: 32 bit value to write to the register + * + * Writes the value specified to the offset specified. + */ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { @@ -355,6 +417,14 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, BUG(); } +/** + * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page + * + * @adev: amdgpu device pointer + * + * Allocates a scratch page of VRAM for use by various things in the + * driver. + */ static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) { return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, @@ -364,6 +434,13 @@ static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) (void **)&adev->vram_scratch.ptr); } +/** + * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page + * + * @adev: amdgpu device pointer + * + * Frees the VRAM scratch page. + */ static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); @@ -405,6 +482,14 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, } } +/** + * amdgpu_device_pci_config_reset - reset the GPU + * + * @adev: amdgpu_device pointer + * + * Resets the GPU using the pci config reset sequence. + * Only applicable to asics prior to vega10. + */ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) { pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); @@ -565,6 +650,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) /** * amdgpu_device_vram_location - try to find VRAM location + * * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @base: base address at which to put VRAM @@ -588,6 +674,7 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev, /** * amdgpu_device_gart_location - try to find GTT location + * * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @@ -774,6 +861,16 @@ static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } +/** + * amdgpu_device_check_block_size - validate the vm block size + * + * @adev: amdgpu_device pointer + * + * Validates the vm block size specified via module parameter. + * The vm block size defines number of bits in page table versus page directory, + * a page is 4KB so we have 12 bits offset, minimum 9 bits in the + * page table and the remaining bits are in the page directory. + */ static void amdgpu_device_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, @@ -789,6 +886,14 @@ static void amdgpu_device_check_block_size(struct amdgpu_device *adev) } } +/** + * amdgpu_device_check_vm_size - validate the vm size + * + * @adev: amdgpu_device pointer + * + * Validates the vm size in GB specified via module parameter. + * The VM size is the size of the GPU virtual memory space in GB. + */ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) { /* no need to check the default value */ @@ -923,6 +1028,17 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { .can_switch = amdgpu_switcheroo_can_switch, }; +/** + * amdgpu_device_ip_set_clockgating_state - set the CG state + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * @state: clockgating state (gate or ungate) + * + * Sets the requested clockgating state for all instances of + * the hardware IP specified. + * Returns the error code from the last instance. + */ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_clockgating_state state) @@ -945,6 +1061,17 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, return r; } +/** + * amdgpu_device_ip_set_powergating_state - set the PG state + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * @state: powergating state (gate or ungate) + * + * Sets the requested powergating state for all instances of + * the hardware IP specified. + * Returns the error code from the last instance. + */ int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_powergating_state state) @@ -967,6 +1094,17 @@ int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, return r; } +/** + * amdgpu_device_ip_get_clockgating_state - get the CG state + * + * @adev: amdgpu_device pointer + * @flags: clockgating feature flags + * + * Walks the list of IPs on the device and updates the clockgating + * flags for each IP. + * Updates @flags with the feature flags for each hardware IP where + * clockgating is enabled. + */ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) { @@ -980,6 +1118,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, } } +/** + * amdgpu_device_ip_wait_for_idle - wait for idle + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * + * Waits for the request hardware IP to be idle. + * Returns 0 for success or a negative error code on failure. + */ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type) { @@ -999,6 +1146,15 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } +/** + * amdgpu_device_ip_is_idle - is the hardware IP idle + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * + * Check if the hardware IP is idle or not. + * Returns true if it the IP is idle, false if not. + */ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type) { @@ -1014,6 +1170,15 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, } +/** + * amdgpu_device_ip_get_ip_block - get a hw IP pointer + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * + * Returns a pointer to the hardware IP block structure + * if it exists for the asic, otherwise NULL. + */ struct amdgpu_ip_block * amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, enum amd_ip_block_type type) @@ -1075,6 +1240,18 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, return 0; } +/** + * amdgpu_device_enable_virtual_display - enable virtual display feature + * + * @adev: amdgpu_device pointer + * + * Enabled the virtual display feature if the user has enabled it via + * the module parameter virtual_display. This feature provides a virtual + * display hardware on headless boards or in virtualized environments. + * This function parses and validates the configuration string specified by + * the user and configues the virtual display configuration (number of + * virtual connectors, crtcs, etc.) specified. + */ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) { adev->enable_virtual_display = false; @@ -1120,6 +1297,16 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) } } +/** + * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware + * + * @adev: amdgpu_device pointer + * + * Parses the asic configuration parameters specified in the gpu info + * firmware and makes them availale to the driver for use in configuring + * the asic. + * Returns 0 on success, -EINVAL on failure. + */ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) { const char *chip_name; @@ -1157,6 +1344,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_VEGA10: chip_name = "vega10"; break; + case CHIP_VEGA12: + chip_name = "vega12"; + break; case CHIP_RAVEN: chip_name = "raven"; break; @@ -1218,6 +1408,16 @@ out: return err; } +/** + * amdgpu_device_ip_early_init - run early init for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Early initialization pass for hardware IPs. The hardware IPs that make + * up each asic are discovered each IP's early_init callback is run. This + * is the first stage in initializing the asic. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { int i, r; @@ -1270,8 +1470,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return r; break; #endif - case CHIP_VEGA10: - case CHIP_RAVEN: + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_RAVEN: if (adev->asic_type == CHIP_RAVEN) adev->family = AMDGPU_FAMILY_RV; else @@ -1327,6 +1528,17 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_init - run init for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main initialization pass for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the sw_init and hw_init callbacks + * are run. sw_init initializes the software state associated with each IP + * and hw_init initializes the hardware associated with each IP. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_init(struct amdgpu_device *adev) { int i, r; @@ -1394,17 +1606,47 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer + * + * @adev: amdgpu_device pointer + * + * Writes a reset magic value to the gart pointer in VRAM. The driver calls + * this function before a GPU reset. If the value is retained after a + * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. + */ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) { memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); } +/** + * amdgpu_device_check_vram_lost - check if vram is valid + * + * @adev: amdgpu_device pointer + * + * Checks the reset magic value written to the gart pointer in VRAM. + * The driver calls this after a GPU reset to see if the contents of + * VRAM is lost or now. + * returns true if vram is lost, false if not. + */ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) { return !!memcmp(adev->gart.ptr, adev->reset_magic, AMDGPU_RESET_MAGIC_NUM); } +/** + * amdgpu_device_ip_late_set_cg_state - late init for clockgating + * + * @adev: amdgpu_device pointer + * + * Late initialization pass enabling clockgating for hardware IPs. + * The list of all the hardware IPs that make up the asic is walked and the + * set_clockgating_state callbacks are run. This stage is run late + * in the init process. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) { int i = 0, r; @@ -1432,6 +1674,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_late_init - run late init for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Late initialization pass for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the late_init callbacks are run. + * late_init covers any special initialization that an IP requires + * after all of the have been initialized or something that needs to happen + * late in the init process. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) { int i = 0, r; @@ -1458,6 +1712,17 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_fini - run fini for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main teardown pass for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks + * are run. hw_fini tears down the hardware associated with each IP + * and sw_fini tears down any software state associated with each IP. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) { int i, r; @@ -1493,7 +1758,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && - adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* ungate blocks before hw fini so that we can shutdown the blocks safely */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, AMD_CG_STATE_UNGATE); @@ -1514,8 +1780,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } - /* disable all interrupts */ - amdgpu_irq_disable_all(adev); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.sw) @@ -1552,6 +1816,15 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_late_init_func_handler - work handler for clockgating + * + * @work: work_struct + * + * Work handler for amdgpu_device_ip_late_set_cg_state. We put the + * clockgating setup into a worker thread to speed up driver init and + * resume from suspend. + */ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) { struct amdgpu_device *adev = @@ -1559,6 +1832,17 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) amdgpu_device_ip_late_set_cg_state(adev); } +/** + * amdgpu_device_ip_suspend - run suspend for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main suspend function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked, clockgating is disabled and the + * suspend callbacks are run. suspend puts the hardware and software state + * in each IP into a state suitable for suspend. + * Returns 0 on success, negative error code on failure. + */ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int i, r; @@ -1667,6 +1951,18 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs + * + * @adev: amdgpu_device pointer + * + * First resume function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the resume callbacks are run for + * COMMON, GMC, and IH. resume puts the hardware into a functional state + * after a suspend and updates the software state as necessary. This + * function is also used for restoring the GPU after a GPU reset. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) { int i, r; @@ -1675,9 +1971,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.valid) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == - AMD_IP_BLOCK_TYPE_IH) { + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { DRM_ERROR("resume of IP block <%s> failed %d\n", @@ -1690,6 +1985,19 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs + * + * @adev: amdgpu_device pointer + * + * First resume function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the resume callbacks are run for + * all blocks except COMMON, GMC, and IH. resume puts the hardware into a + * functional state after a suspend and updates the software state as + * necessary. This function is also used for restoring the GPU after a GPU + * reset. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) { int i, r; @@ -1698,8 +2006,8 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.valid) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ) + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) continue; r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { @@ -1712,6 +2020,18 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_resume - run resume for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main resume function for hardware IPs. The hardware IPs + * are split into two resume functions because they are + * are also used in in recovering from a GPU reset and some additional + * steps need to be take between them. In this case (S3/S4) they are + * run sequentially. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; @@ -1724,6 +2044,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) return r; } +/** + * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV + * + * @adev: amdgpu_device pointer + * + * Query the VBIOS data tables to determine if the board supports SR-IOV. + */ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { @@ -1740,6 +2067,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) } } +/** + * amdgpu_device_asic_has_dc_support - determine if DC supports the asic + * + * @asic_type: AMD asic type + * + * Check if there is DC (new modesetting infrastructre) support for an asic. + * returns true if DC has support, false if not. + */ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { @@ -1760,6 +2095,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) return amdgpu_dc != 0; #endif case CHIP_VEGA10: + case CHIP_VEGA12: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: #endif @@ -2017,7 +2353,6 @@ fence_driver_init: } dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); - amdgpu_device_ip_fini(adev); goto failed; } @@ -2116,9 +2451,14 @@ void amdgpu_device_fini(struct amdgpu_device *adev) DRM_INFO("amdgpu: finishing device.\n"); adev->shutdown = true; - if (adev->mode_info.mode_config_initialized) - drm_crtc_force_disable_all(adev->ddev); - + /* disable all interrupts */ + amdgpu_irq_disable_all(adev); + if (adev->mode_info.mode_config_initialized){ + if (!amdgpu_device_has_dc_support(adev)) + drm_crtc_force_disable_all(adev->ddev); + else + drm_atomic_helper_shutdown(adev->ddev); + } amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_pm_sysfs_fini(adev); @@ -2378,6 +2718,16 @@ unlock: return r; } +/** + * amdgpu_device_ip_check_soft_reset - did soft reset succeed + * + * @adev: amdgpu_device pointer + * + * The list of all the hardware IPs that make up the asic is walked and + * the check_soft_reset callbacks are run. check_soft_reset determines + * if the asic is still hung or not. + * Returns true if any of the IPs are still in a hung state, false if not. + */ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) { int i; @@ -2400,6 +2750,17 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) return asic_hang; } +/** + * amdgpu_device_ip_pre_soft_reset - prepare for soft reset + * + * @adev: amdgpu_device pointer + * + * The list of all the hardware IPs that make up the asic is walked and the + * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset + * handles any IP specific hardware or software state changes that are + * necessary for a soft reset to succeed. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) { int i, r = 0; @@ -2418,6 +2779,15 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed + * + * @adev: amdgpu_device pointer + * + * Some hardware IPs cannot be soft reset. If they are hung, a full gpu + * reset is necessary to recover. + * Returns true if a full asic reset is required, false if not. + */ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) { int i; @@ -2439,6 +2809,17 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) return false; } +/** + * amdgpu_device_ip_soft_reset - do a soft reset + * + * @adev: amdgpu_device pointer + * + * The list of all the hardware IPs that make up the asic is walked and the + * soft_reset callbacks are run if the block is hung. soft_reset handles any + * IP specific hardware or software state changes that are necessary to soft + * reset the IP. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) { int i, r = 0; @@ -2457,6 +2838,17 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_post_soft_reset - clean up from soft reset + * + * @adev: amdgpu_device pointer + * + * The list of all the hardware IPs that make up the asic is walked and the + * post_soft_reset callbacks are run if the asic was hung. post_soft_reset + * handles any IP specific hardware or software state changes that are + * necessary after the IP has been soft reset. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) { int i, r = 0; @@ -2474,6 +2866,19 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers + * + * @adev: amdgpu_device pointer + * @ring: amdgpu_ring for the engine handling the buffer operations + * @bo: amdgpu_bo buffer whose shadow is being restored + * @fence: dma_fence associated with the operation + * + * Restores the VRAM buffer contents from the shadow in GTT. Used to + * restore things like GPUVM page tables after a GPU reset where + * the contents of VRAM might be lost. + * Returns 0 on success, negative error code on failure. + */ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, @@ -2509,6 +2914,16 @@ err: return r; } +/** + * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents + * + * @adev: amdgpu_device pointer + * + * Restores the contents of VRAM buffers from the shadows in GTT. Used to + * restore things like GPUVM page tables after a GPU reset where + * the contents of VRAM might be lost. + * Returns 0 on success, 1 on failure. + */ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -2562,17 +2977,17 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) else DRM_ERROR("recover vram bo from shadow failed\n"); - return (r > 0?0:1); + return (r > 0) ? 0 : 1; } -/* +/** * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough * * @adev: amdgpu device pointer * * attempt to do soft-reset or full-reset and reinitialize Asic * return 0 means successed otherwise failed -*/ + */ static int amdgpu_device_reset(struct amdgpu_device *adev) { bool need_full_reset, vram_lost = 0; @@ -2642,15 +3057,16 @@ out: return r; } -/* +/** * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * * @adev: amdgpu device pointer * * do VF FLR and reinitialize Asic * return 0 means successed otherwise failed -*/ -static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor) + */ +static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, + bool from_hypervisor) { int r; @@ -2790,6 +3206,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, return r; } +/** + * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot + * + * @adev: amdgpu_device pointer + * + * Fetchs and stores in the driver the PCIE capabilities (gen speed + * and lanes) of the slot the device is in. Handles APUs and + * virtualized environments where PCIE config space may not be available. + */ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) { u32 mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 2337d4bfd85c..7379aa5a6849 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -544,6 +544,12 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + /* Vega 12 */ + {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, + {0x1002, 0x69A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, + {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, + {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, + {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 008e1984b7e3..455a81e4c246 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -435,7 +435,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, num_hw_submission, amdgpu_job_hang_limit, - msecs_to_jiffies(amdgpu_lockup_timeout), ring->name); + (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ? + MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(amdgpu_lockup_timeout), + ring->name); if (r) { DRM_ERROR("Failed to create scheduler on ring %s.\n", ring->name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 46b9ea4e6103..28c2706e48d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -56,23 +56,11 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, alignment = PAGE_SIZE; } -retry: r = amdgpu_bo_create(adev, size, alignment, initial_domain, flags, type, resv, &bo); if (r) { - if (r != -ERESTARTSYS) { - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - goto retry; - } - - if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { - initial_domain |= AMDGPU_GEM_DOMAIN_GTT; - goto retry; - } - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", - size, initial_domain, alignment, r); - } + DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", + size, initial_domain, alignment, r); return r; } *obj = &bo->gem_base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 11dfe57bd8bb..3a5ca462abf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -259,6 +259,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) } } kfree(adev->irq.client[i].sources); + adev->irq.client[i].sources = NULL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e851c66cbb5e..4b7824d30e73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -190,6 +190,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->uvd.fw_version; fw_info->feature = 0; break; + case AMDGPU_INFO_FW_VCN: + fw_info->ver = adev->vcn.fw_version; + fw_info->feature = 0; + break; case AMDGPU_INFO_FW_GMC: fw_info->ver = adev->gmc.fw_version; fw_info->feature = 0; @@ -1198,6 +1202,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) i, fw_info.feature, fw_info.ver); } + /* VCN */ + query_fw.fw_type = AMDGPU_INFO_FW_VCN; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6d08cde8443c..fac4b6067efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -356,6 +356,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, struct amdgpu_bo *bo; unsigned long page_align; size_t acc_size; + u32 domains; int r; page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; @@ -417,12 +418,23 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, #endif bo->tbo.bdev = &adev->mman.bdev; - amdgpu_ttm_placement_from_domain(bo, domain); - + domains = bo->preferred_domains; +retry: + amdgpu_ttm_placement_from_domain(bo, domains); r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, &ctx, acc_size, NULL, resv, &amdgpu_ttm_bo_destroy); - if (unlikely(r != 0)) + + if (unlikely(r && r != -ERESTARTSYS)) { + if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + goto retry; + } else if (domains != bo->preferred_domains) { + domains = bo->allowed_domains; + goto retry; + } + } + if (unlikely(r)) return r; if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 1c9991738477..4b584cb75bf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -132,6 +132,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, { struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); long r; r = drm_gem_map_attach(dma_buf, target_dev, attach); @@ -143,7 +144,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, goto error_detach; - if (dma_buf->ops != &amdgpu_dmabuf_ops) { + if (attach->dev->driver != adev->dev->driver) { /* * Wait for all shared fences to complete before we switch to future * use of exclusive fence on this prime shared bo. @@ -162,7 +163,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, if (r) goto error_unreserve; - if (dma_buf->ops != &amdgpu_dmabuf_ops) + if (attach->dev->driver != adev->dev->driver) bo->prime_shared_count++; error_unreserve: @@ -179,6 +180,7 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, { struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int ret = 0; ret = amdgpu_bo_reserve(bo, true); @@ -186,7 +188,7 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, goto error; amdgpu_bo_unpin(bo); - if (dma_buf->ops != &amdgpu_dmabuf_ops && bo->prime_shared_count) + if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) bo->prime_shared_count--; amdgpu_bo_unreserve(bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 9a75410cd576..19e71f4a8ac2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -51,6 +51,7 @@ static int psp_sw_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: psp_v3_1_set_psp_funcs(psp); break; case CHIP_RAVEN: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e28b73609fbc..205da3ff9cd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2021,7 +2021,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, return -EPERM; ptr = kmap(p); - r = copy_to_user(buf, ptr, bytes); + r = copy_to_user(buf, ptr + off, bytes); kunmap(p); if (r) return -EFAULT; @@ -2065,7 +2065,7 @@ static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, return -EPERM; ptr = kmap(p); - r = copy_from_user(ptr, buf, bytes); + r = copy_from_user(ptr + off, buf, bytes); kunmap(p); if (r) return -EFAULT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 474f88fbafce..dd6f98921918 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -271,6 +271,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_SMU; case CHIP_VEGA10: case CHIP_RAVEN: + case CHIP_VEGA12: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index f3c459b7c0bb..627542b22ae4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -68,6 +68,7 @@ #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" +#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" #define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) #define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) @@ -110,6 +111,7 @@ MODULE_FIRMWARE(FIRMWARE_POLARIS11); MODULE_FIRMWARE(FIRMWARE_POLARIS12); MODULE_FIRMWARE(FIRMWARE_VEGA10); +MODULE_FIRMWARE(FIRMWARE_VEGA12); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -161,11 +163,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_POLARIS11: fw_name = FIRMWARE_POLARIS11; break; + case CHIP_POLARIS12: + fw_name = FIRMWARE_POLARIS12; + break; case CHIP_VEGA10: fw_name = FIRMWARE_VEGA10; break; - case CHIP_POLARIS12: - fw_name = FIRMWARE_POLARIS12; + case CHIP_VEGA12: + fw_name = FIRMWARE_VEGA12; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 9152478d7528..a33804bd3314 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -55,6 +55,7 @@ #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" +#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); @@ -72,6 +73,7 @@ MODULE_FIRMWARE(FIRMWARE_POLARIS11); MODULE_FIRMWARE(FIRMWARE_POLARIS12); MODULE_FIRMWARE(FIRMWARE_VEGA10); +MODULE_FIRMWARE(FIRMWARE_VEGA12); static void amdgpu_vce_idle_work_handler(struct work_struct *work); @@ -127,11 +129,14 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_POLARIS11: fw_name = FIRMWARE_POLARIS11; break; + case CHIP_POLARIS12: + fw_name = FIRMWARE_POLARIS12; + break; case CHIP_VEGA10: fw_name = FIRMWARE_VEGA10; break; - case CHIP_POLARIS12: - fw_name = FIRMWARE_POLARIS12; + case CHIP_VEGA12: + fw_name = FIRMWARE_VEGA12; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 98d1dd253596..47ef3e6e7178 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6244,6 +6244,7 @@ static int ci_dpm_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->powerplay.pp_funcs = &ci_dpm_funcs; + adev->powerplay.pp_handle = adev; ci_dpm_set_irq_funcs(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 8201a0929ca2..b51f05dc9582 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle) /* no DCE */ break; case CHIP_VEGA10: + case CHIP_VEGA12: break; default: DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d1d2c27156b2..1ae3de1094f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -57,6 +57,13 @@ MODULE_FIRMWARE("amdgpu/vega10_mec.bin"); MODULE_FIRMWARE("amdgpu/vega10_mec2.bin"); MODULE_FIRMWARE("amdgpu/vega10_rlc.bin"); +MODULE_FIRMWARE("amdgpu/vega12_ce.bin"); +MODULE_FIRMWARE("amdgpu/vega12_pfp.bin"); +MODULE_FIRMWARE("amdgpu/vega12_me.bin"); +MODULE_FIRMWARE("amdgpu/vega12_mec.bin"); +MODULE_FIRMWARE("amdgpu/vega12_mec2.bin"); +MODULE_FIRMWARE("amdgpu/vega12_rlc.bin"); + MODULE_FIRMWARE("amdgpu/raven_ce.bin"); MODULE_FIRMWARE("amdgpu/raven_pfp.bin"); MODULE_FIRMWARE("amdgpu/raven_me.bin"); @@ -144,7 +151,42 @@ static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) }; +static const struct soc15_reg_golden golden_settings_gc_9_2_1[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) +}; + +static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000) +}; + #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 +#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); @@ -168,6 +210,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_0_vg10, ARRAY_SIZE(golden_settings_gc_9_0_vg10)); break; + case CHIP_VEGA12: + soc15_program_register_sequence(adev, + golden_settings_gc_9_2_1, + ARRAY_SIZE(golden_settings_gc_9_2_1)); + soc15_program_register_sequence(adev, + golden_settings_gc_9_2_1_vg12, + ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_gc_9_1, @@ -369,6 +419,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) case CHIP_VEGA10: chip_name = "vega10"; break; + case CHIP_VEGA12: + chip_name = "vega12"; + break; case CHIP_RAVEN: chip_name = "raven"; break; @@ -968,6 +1021,15 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_VEGA12: + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; + DRM_INFO("fix gfx.config for vega12\n"); + break; case CHIP_RAVEN: adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; @@ -1249,6 +1311,7 @@ static int gfx_v9_0_sw_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_RAVEN: adev->gfx.mec.num_mec = 2; break; @@ -3482,6 +3545,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_RAVEN: gfx_v9_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -4453,6 +4517,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_RAVEN: adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index a70cbc45c4c1..e687363900bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -791,6 +791,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (amdgpu_gart_size == -1) { switch (adev->asic_type) { case CHIP_VEGA10: /* all engines support GPUVM */ + case CHIP_VEGA12: /* all engines support GPUVM */ default: adev->gmc.gart_size = 512ULL << 20; break; @@ -849,6 +850,7 @@ static int gmc_v9_0_sw_init(void *handle) } break; case CHIP_VEGA10: + case CHIP_VEGA12: /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Vega10, @@ -965,6 +967,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_athub_1_0_0, ARRAY_SIZE(golden_settings_athub_1_0_0)); break; + case CHIP_VEGA12: + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_athub_1_0_0, diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 81babe026529..26ba984ab2b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2963,6 +2963,7 @@ static int kv_dpm_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->powerplay.pp_funcs = &kv_dpm_funcs; + adev->powerplay.pp_handle = adev; kv_dpm_set_irq_funcs(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 3dd5816495a5..43f925773b57 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -733,6 +733,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_RAVEN: mmhub_v1_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 8fb933c62cf5..493348672475 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -129,7 +129,7 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_ai_mailbox_set_valid(adev, false); trn = xgpu_ai_peek_ack(adev); if (trn) { - pr_err("trn=%x ACK should not asssert! wait again !\n", trn); + pr_err("trn=%x ACK should not assert! wait again !\n", trn); msleep(1); } } while(trn); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 1cf34248dff4..6f9c54978cc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -220,12 +220,12 @@ static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev) static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev) { - return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX); + return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2); } static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev) { - return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA); + return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); } static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 690b9766d8ae..196e75def1f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -39,6 +39,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin"); MODULE_FIRMWARE("amdgpu/vega10_asd.bin"); +MODULE_FIRMWARE("amdgpu/vega12_sos.bin"); +MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); #define smnMP1_FIRMWARE_FLAGS 0x3010028 @@ -107,6 +109,9 @@ static int psp_v3_1_init_microcode(struct psp_context *psp) case CHIP_VEGA10: chip_name = "vega10"; break; + case CHIP_VEGA12: + chip_name = "vega12"; + break; default: BUG(); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 9448c45d1b60..2a8184082cd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -40,6 +40,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/vega12_sdma.bin"); +MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin"); MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L @@ -84,6 +86,13 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) }; +static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) +}; + static const struct soc15_reg_golden golden_settings_sdma_4_1[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), @@ -122,6 +131,14 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_vg10, ARRAY_SIZE(golden_settings_sdma_vg10)); break; + case CHIP_VEGA12: + soc15_program_register_sequence(adev, + golden_settings_sdma_4, + ARRAY_SIZE(golden_settings_sdma_4)); + soc15_program_register_sequence(adev, + golden_settings_sdma_vg12, + ARRAY_SIZE(golden_settings_sdma_vg12)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_sdma_4_1, @@ -162,6 +179,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) case CHIP_VEGA10: chip_name = "vega10"; break; + case CHIP_VEGA12: + chip_name = "vega12"; + break; case CHIP_RAVEN: chip_name = "raven"; break; @@ -1489,6 +1509,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_RAVEN: sdma_v4_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -1618,7 +1639,7 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev) * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * - * Copy GPU buffers using the DMA engine (VEGA10). + * Copy GPU buffers using the DMA engine (VEGA10/12). * Used by the amdgpu ttm implementation to move pages if * registered as the asic copy callback. */ @@ -1645,7 +1666,7 @@ static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib, * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * - * Fill GPU buffers using the DMA engine (VEGA10). + * Fill GPU buffers using the DMA engine (VEGA10/12). */ static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 3bfcf0d257ab..672eaffac0a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -7917,6 +7917,7 @@ static int si_dpm_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->powerplay.pp_funcs = &si_dpm_funcs; + adev->powerplay.pp_handle = adev; si_dpm_set_irq_funcs(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c6e857325b58..51cf8a30f6c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -508,6 +508,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) /* Set IP register base before any HW register access */ switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_RAVEN: vega10_reg_base_init(adev); break; @@ -527,6 +528,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); @@ -608,7 +610,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = static int soc15_common_early_init(void *handle) { - bool psp_enabled = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->smc_rreg = NULL; @@ -626,10 +627,6 @@ static int soc15_common_early_init(void *handle) adev->asic_funcs = &soc15_asic_funcs; - if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && - (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) - psp_enabled = true; - adev->rev_id = soc15_get_rev_id(adev); adev->external_rev_id = 0xFF; switch (adev->asic_type) { @@ -656,6 +653,28 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = 0x1; break; + case CHIP_VEGA12: + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x14; + break; case CHIP_RAVEN: adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -888,6 +907,7 @@ static int soc15_common_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA12: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index e7fb165cc9db..126f1276d347 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -896,7 +896,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = static int vi_common_early_init(void *handle) { - bool smc_enabled = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->flags & AMD_IS_APU) { @@ -917,10 +916,6 @@ static int vi_common_early_init(void *handle) adev->asic_funcs = &vi_asic_funcs; - if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && - (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) - smc_enabled = true; - adev->rev_id = vi_get_rev_id(adev); adev->external_rev_id = 0xFF; switch (adev->asic_type) { |