Loading drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +56 −51 Original line number Diff line number Diff line Loading @@ -37,11 +37,12 @@ static int nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], u32 process, u32 message, u32 data0, u32 data1) { struct nvkm_subdev *subdev = nv_subdev(pmu); struct nvkm_subdev *subdev = &pmu->subdev; struct nvkm_device *device = subdev->device; u32 addr; /* wait for a free slot in the fifo */ addr = nv_rd32(pmu, 0x10a4a0); addr = nvkm_rd32(device, 0x10a4a0); if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8)) return -EBUSY; Loading @@ -57,20 +58,20 @@ nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], /* acquire data segment access */ do { nv_wr32(pmu, 0x10a580, 0x00000001); } while (nv_rd32(pmu, 0x10a580) != 0x00000001); nvkm_wr32(device, 0x10a580, 0x00000001); } while (nvkm_rd32(device, 0x10a580) != 0x00000001); /* write the packet */ nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + pmu->send.base)); nv_wr32(pmu, 0x10a1c4, process); nv_wr32(pmu, 0x10a1c4, message); nv_wr32(pmu, 0x10a1c4, data0); nv_wr32(pmu, 0x10a1c4, data1); nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f); nvkm_wr32(device, 0x10a1c4, process); nvkm_wr32(device, 0x10a1c4, message); nvkm_wr32(device, 0x10a1c4, data0); nvkm_wr32(device, 0x10a1c4, data1); nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); /* release data segment access */ nv_wr32(pmu, 0x10a580, 0x00000000); nvkm_wr32(device, 0x10a580, 0x00000000); /* wait for reply, if requested */ if (reply) { Loading @@ -87,29 +88,30 @@ static void nvkm_pmu_recv(struct work_struct *work) { struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); struct nvkm_device *device = pmu->subdev.device; u32 process, message, data0, data1; /* nothing to do if GET == PUT */ u32 addr = nv_rd32(pmu, 0x10a4cc); if (addr == nv_rd32(pmu, 0x10a4c8)) u32 addr = nvkm_rd32(device, 0x10a4cc); if (addr == nvkm_rd32(device, 0x10a4c8)) return; /* acquire data segment access */ do { nv_wr32(pmu, 0x10a580, 0x00000002); } while (nv_rd32(pmu, 0x10a580) != 0x00000002); nvkm_wr32(device, 0x10a580, 0x00000002); } while (nvkm_rd32(device, 0x10a580) != 0x00000002); /* read the packet */ nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + pmu->recv.base)); process = nv_rd32(pmu, 0x10a1c4); message = nv_rd32(pmu, 0x10a1c4); data0 = nv_rd32(pmu, 0x10a1c4); data1 = nv_rd32(pmu, 0x10a1c4); nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f); process = nvkm_rd32(device, 0x10a1c4); message = nvkm_rd32(device, 0x10a1c4); data0 = nvkm_rd32(device, 0x10a1c4); data1 = nvkm_rd32(device, 0x10a1c4); nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); /* release data segment access */ nv_wr32(pmu, 0x10a580, 0x00000000); nvkm_wr32(device, 0x10a580, 0x00000000); /* wake process if it's waiting on a synchronous reply */ if (pmu->recv.process) { Loading Loading @@ -137,36 +139,37 @@ nvkm_pmu_recv(struct work_struct *work) static void nvkm_pmu_intr(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = (void *)subdev; u32 disp = nv_rd32(pmu, 0x10a01c); u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16); struct nvkm_pmu *pmu = container_of(subdev, typeof(*pmu), subdev); struct nvkm_device *device = pmu->subdev.device; u32 disp = nvkm_rd32(device, 0x10a01c); u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); if (intr & 0x00000020) { u32 stat = nv_rd32(pmu, 0x10a16c); u32 stat = nvkm_rd32(device, 0x10a16c); if (stat & 0x80000000) { nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n", stat & 0x00ffffff, nv_rd32(pmu, 0x10a168)); nv_wr32(pmu, 0x10a16c, 0x00000000); stat & 0x00ffffff, nvkm_rd32(device, 0x10a168)); nvkm_wr32(device, 0x10a16c, 0x00000000); intr &= ~0x00000020; } } if (intr & 0x00000040) { schedule_work(&pmu->recv.work); nv_wr32(pmu, 0x10a004, 0x00000040); nvkm_wr32(device, 0x10a004, 0x00000040); intr &= ~0x00000040; } if (intr & 0x00000080) { nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0), nv_rd32(pmu, 0x10a7a4)); nv_wr32(pmu, 0x10a004, 0x00000080); nv_info(pmu, "wr32 0x%06x 0x%08x\n", nvkm_rd32(device, 0x10a7a0), nvkm_rd32(device, 0x10a7a4)); nvkm_wr32(device, 0x10a004, 0x00000080); intr &= ~0x00000080; } if (intr) { nv_error(pmu, "intr 0x%08x\n", intr); nv_wr32(pmu, 0x10a004, intr); nvkm_wr32(device, 0x10a004, intr); } } Loading @@ -174,8 +177,9 @@ int _nvkm_pmu_fini(struct nvkm_object *object, bool suspend) { struct nvkm_pmu *pmu = (void *)object; struct nvkm_device *device = pmu->subdev.device; nv_wr32(pmu, 0x10a014, 0x00000060); nvkm_wr32(device, 0x10a014, 0x00000060); flush_work(&pmu->recv.work); return nvkm_subdev_fini(&pmu->subdev, suspend); Loading @@ -186,6 +190,7 @@ _nvkm_pmu_init(struct nvkm_object *object) { const struct nvkm_pmu_impl *impl = (void *)object->oclass; struct nvkm_pmu *pmu = (void *)object; struct nvkm_device *device = pmu->subdev.device; int ret, i; ret = nvkm_subdev_init(&pmu->subdev); Loading @@ -197,44 +202,44 @@ _nvkm_pmu_init(struct nvkm_object *object) pmu->pgob = nvkm_pmu_pgob; /* prevent previous ucode from running, wait for idle, reset */ nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000); nv_mask(pmu, 0x000200, 0x00002000, 0x00000000); nv_mask(pmu, 0x000200, 0x00002000, 0x00002000); nv_rd32(pmu, 0x000200); nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); nvkm_rd32(device, 0x000200); nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000); /* upload data segment */ nv_wr32(pmu, 0x10a1c0, 0x01000000); nvkm_wr32(device, 0x10a1c0, 0x01000000); for (i = 0; i < impl->data.size / 4; i++) nv_wr32(pmu, 0x10a1c4, impl->data.data[i]); nvkm_wr32(device, 0x10a1c4, impl->data.data[i]); /* upload code segment */ nv_wr32(pmu, 0x10a180, 0x01000000); nvkm_wr32(device, 0x10a180, 0x01000000); for (i = 0; i < impl->code.size / 4; i++) { if ((i & 0x3f) == 0) nv_wr32(pmu, 0x10a188, i >> 6); nv_wr32(pmu, 0x10a184, impl->code.data[i]); nvkm_wr32(device, 0x10a188, i >> 6); nvkm_wr32(device, 0x10a184, impl->code.data[i]); } /* start it running */ nv_wr32(pmu, 0x10a10c, 0x00000000); nv_wr32(pmu, 0x10a104, 0x00000000); nv_wr32(pmu, 0x10a100, 0x00000002); nvkm_wr32(device, 0x10a10c, 0x00000000); nvkm_wr32(device, 0x10a104, 0x00000000); nvkm_wr32(device, 0x10a100, 0x00000002); /* wait for valid host->pmu ring configuration */ if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000)) return -EBUSY; pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff; pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16; pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; /* wait for valid pmu->host ring configuration */ if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000)) return -EBUSY; pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff; pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16; pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; nv_wr32(pmu, 0x10a010, 0x000000e0); nvkm_wr32(device, 0x10a010, 0x000000e0); return 0; } Loading drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +32 −32 Original line number Diff line number Diff line Loading @@ -31,49 +31,49 @@ #include <subdev/timer.h> static void magic_(struct nvkm_pmu *pmu, u32 ctrl, int size) magic_(struct nvkm_device *device, u32 ctrl, int size) { nv_wr32(pmu, 0x00c800, 0x00000000); nv_wr32(pmu, 0x00c808, 0x00000000); nv_wr32(pmu, 0x00c800, ctrl); if (nv_wait(pmu, 0x00c800, 0x40000000, 0x40000000)) { nvkm_wr32(device, 0x00c800, 0x00000000); nvkm_wr32(device, 0x00c808, 0x00000000); nvkm_wr32(device, 0x00c800, ctrl); if (nv_wait(device, 0x00c800, 0x40000000, 0x40000000)) { while (size--) nv_wr32(pmu, 0x00c804, 0x00000000); nvkm_wr32(device, 0x00c804, 0x00000000); } nv_wr32(pmu, 0x00c800, 0x00000000); nvkm_wr32(device, 0x00c800, 0x00000000); } static void magic(struct nvkm_pmu *pmu, u32 ctrl) magic(struct nvkm_device *device, u32 ctrl) { magic_(pmu, 0x8000a41f | ctrl, 6); magic_(pmu, 0x80000421 | ctrl, 1); magic_(device, 0x8000a41f | ctrl, 6); magic_(device, 0x80000421 | ctrl, 1); } static void gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) { struct nvkm_device *device = nv_device(pmu); struct nvkm_device *device = pmu->subdev.device; nv_mask(pmu, 0x000200, 0x00001000, 0x00000000); nv_rd32(pmu, 0x000200); nv_mask(pmu, 0x000200, 0x08000000, 0x08000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); nvkm_rd32(device, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x08000000); msleep(50); nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); msleep(50); nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x000200, 0x08000000, 0x00000000); nv_mask(pmu, 0x000200, 0x00001000, 0x00001000); nv_rd32(pmu, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x00000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); nvkm_rd32(device, 0x000200); if (nv_device_match(device, 0x11fc, 0x17aa, 0x2211) /* Lenovo W541 */ || nv_device_match(device, 0x11fc, 0x17aa, 0x221e) /* Lenovo W541 */ Loading @@ -81,18 +81,18 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) nv_info(pmu, "hw bug workaround enabled\n"); switch (device->chipset) { case 0xe4: magic(pmu, 0x04000000); magic(pmu, 0x06000000); magic(pmu, 0x0c000000); magic(pmu, 0x0e000000); magic(device, 0x04000000); magic(device, 0x06000000); magic(device, 0x0c000000); magic(device, 0x0e000000); break; case 0xe6: magic(pmu, 0x02000000); magic(pmu, 0x04000000); magic(pmu, 0x0a000000); magic(device, 0x02000000); magic(device, 0x04000000); magic(device, 0x0a000000); break; case 0xe7: magic(pmu, 0x02000000); magic(device, 0x02000000); break; default: break; Loading drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c +15 −14 Original line number Diff line number Diff line Loading @@ -31,6 +31,7 @@ void gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) { struct nvkm_device *device = pmu->subdev.device; static const struct { u32 addr; u32 data; Loading @@ -54,28 +55,28 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) }; int i; nv_mask(pmu, 0x000200, 0x00001000, 0x00000000); nv_rd32(pmu, 0x000200); nv_mask(pmu, 0x000200, 0x08000000, 0x08000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); nvkm_rd32(device, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x08000000); msleep(50); nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000); nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000); for (i = 0; i < ARRAY_SIZE(magic); i++) { nv_wr32(pmu, magic[i].addr, magic[i].data); nvkm_wr32(device, magic[i].addr, magic[i].data); nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000); } nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x000200, 0x08000000, 0x00000000); nv_mask(pmu, 0x000200, 0x00001000, 0x00001000); nv_rd32(pmu, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x00000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); nvkm_rd32(device, 0x000200); } struct nvkm_oclass * Loading drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +10 −7 Original line number Diff line number Diff line Loading @@ -98,16 +98,18 @@ static int gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, struct gk20a_pmu_dvfs_dev_status *status) { status->busy = nv_rd32(pmu, 0x10a508 + (BUSY_SLOT * 0x10)); status->total= nv_rd32(pmu, 0x10a508 + (CLK_SLOT * 0x10)); struct nvkm_device *device = pmu->base.subdev.device; status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10)); status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10)); return 0; } static void gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) { nv_wr32(pmu, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); nv_wr32(pmu, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); struct nvkm_device *device = pmu->base.subdev.device; nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); } static void Loading Loading @@ -173,6 +175,7 @@ static int gk20a_pmu_init(struct nvkm_object *object) { struct gk20a_pmu *pmu = (void *)object; struct nvkm_device *device = pmu->base.subdev.device; int ret; ret = nvkm_subdev_init(&pmu->base.subdev); Loading @@ -182,9 +185,9 @@ gk20a_pmu_init(struct nvkm_object *object) pmu->base.pgob = nvkm_pmu_pgob; /* init pwr perf counter */ nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); nvkm_timer_alarm(pmu, 2000000000, &pmu->alarm); return ret; Loading drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c +3 −2 Original line number Diff line number Diff line Loading @@ -28,8 +28,9 @@ static int gt215_pmu_init(struct nvkm_object *object) { struct nvkm_pmu *pmu = (void *)object; nv_mask(pmu, 0x022210, 0x00000001, 0x00000000); nv_mask(pmu, 0x022210, 0x00000001, 0x00000001); struct nvkm_device *device = pmu->subdev.device; nvkm_mask(device, 0x022210, 0x00000001, 0x00000000); nvkm_mask(device, 0x022210, 0x00000001, 0x00000001); return nvkm_pmu_init(pmu); } Loading Loading
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +56 −51 Original line number Diff line number Diff line Loading @@ -37,11 +37,12 @@ static int nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], u32 process, u32 message, u32 data0, u32 data1) { struct nvkm_subdev *subdev = nv_subdev(pmu); struct nvkm_subdev *subdev = &pmu->subdev; struct nvkm_device *device = subdev->device; u32 addr; /* wait for a free slot in the fifo */ addr = nv_rd32(pmu, 0x10a4a0); addr = nvkm_rd32(device, 0x10a4a0); if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8)) return -EBUSY; Loading @@ -57,20 +58,20 @@ nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], /* acquire data segment access */ do { nv_wr32(pmu, 0x10a580, 0x00000001); } while (nv_rd32(pmu, 0x10a580) != 0x00000001); nvkm_wr32(device, 0x10a580, 0x00000001); } while (nvkm_rd32(device, 0x10a580) != 0x00000001); /* write the packet */ nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + pmu->send.base)); nv_wr32(pmu, 0x10a1c4, process); nv_wr32(pmu, 0x10a1c4, message); nv_wr32(pmu, 0x10a1c4, data0); nv_wr32(pmu, 0x10a1c4, data1); nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f); nvkm_wr32(device, 0x10a1c4, process); nvkm_wr32(device, 0x10a1c4, message); nvkm_wr32(device, 0x10a1c4, data0); nvkm_wr32(device, 0x10a1c4, data1); nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); /* release data segment access */ nv_wr32(pmu, 0x10a580, 0x00000000); nvkm_wr32(device, 0x10a580, 0x00000000); /* wait for reply, if requested */ if (reply) { Loading @@ -87,29 +88,30 @@ static void nvkm_pmu_recv(struct work_struct *work) { struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); struct nvkm_device *device = pmu->subdev.device; u32 process, message, data0, data1; /* nothing to do if GET == PUT */ u32 addr = nv_rd32(pmu, 0x10a4cc); if (addr == nv_rd32(pmu, 0x10a4c8)) u32 addr = nvkm_rd32(device, 0x10a4cc); if (addr == nvkm_rd32(device, 0x10a4c8)) return; /* acquire data segment access */ do { nv_wr32(pmu, 0x10a580, 0x00000002); } while (nv_rd32(pmu, 0x10a580) != 0x00000002); nvkm_wr32(device, 0x10a580, 0x00000002); } while (nvkm_rd32(device, 0x10a580) != 0x00000002); /* read the packet */ nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + pmu->recv.base)); process = nv_rd32(pmu, 0x10a1c4); message = nv_rd32(pmu, 0x10a1c4); data0 = nv_rd32(pmu, 0x10a1c4); data1 = nv_rd32(pmu, 0x10a1c4); nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f); process = nvkm_rd32(device, 0x10a1c4); message = nvkm_rd32(device, 0x10a1c4); data0 = nvkm_rd32(device, 0x10a1c4); data1 = nvkm_rd32(device, 0x10a1c4); nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); /* release data segment access */ nv_wr32(pmu, 0x10a580, 0x00000000); nvkm_wr32(device, 0x10a580, 0x00000000); /* wake process if it's waiting on a synchronous reply */ if (pmu->recv.process) { Loading Loading @@ -137,36 +139,37 @@ nvkm_pmu_recv(struct work_struct *work) static void nvkm_pmu_intr(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = (void *)subdev; u32 disp = nv_rd32(pmu, 0x10a01c); u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16); struct nvkm_pmu *pmu = container_of(subdev, typeof(*pmu), subdev); struct nvkm_device *device = pmu->subdev.device; u32 disp = nvkm_rd32(device, 0x10a01c); u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); if (intr & 0x00000020) { u32 stat = nv_rd32(pmu, 0x10a16c); u32 stat = nvkm_rd32(device, 0x10a16c); if (stat & 0x80000000) { nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n", stat & 0x00ffffff, nv_rd32(pmu, 0x10a168)); nv_wr32(pmu, 0x10a16c, 0x00000000); stat & 0x00ffffff, nvkm_rd32(device, 0x10a168)); nvkm_wr32(device, 0x10a16c, 0x00000000); intr &= ~0x00000020; } } if (intr & 0x00000040) { schedule_work(&pmu->recv.work); nv_wr32(pmu, 0x10a004, 0x00000040); nvkm_wr32(device, 0x10a004, 0x00000040); intr &= ~0x00000040; } if (intr & 0x00000080) { nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0), nv_rd32(pmu, 0x10a7a4)); nv_wr32(pmu, 0x10a004, 0x00000080); nv_info(pmu, "wr32 0x%06x 0x%08x\n", nvkm_rd32(device, 0x10a7a0), nvkm_rd32(device, 0x10a7a4)); nvkm_wr32(device, 0x10a004, 0x00000080); intr &= ~0x00000080; } if (intr) { nv_error(pmu, "intr 0x%08x\n", intr); nv_wr32(pmu, 0x10a004, intr); nvkm_wr32(device, 0x10a004, intr); } } Loading @@ -174,8 +177,9 @@ int _nvkm_pmu_fini(struct nvkm_object *object, bool suspend) { struct nvkm_pmu *pmu = (void *)object; struct nvkm_device *device = pmu->subdev.device; nv_wr32(pmu, 0x10a014, 0x00000060); nvkm_wr32(device, 0x10a014, 0x00000060); flush_work(&pmu->recv.work); return nvkm_subdev_fini(&pmu->subdev, suspend); Loading @@ -186,6 +190,7 @@ _nvkm_pmu_init(struct nvkm_object *object) { const struct nvkm_pmu_impl *impl = (void *)object->oclass; struct nvkm_pmu *pmu = (void *)object; struct nvkm_device *device = pmu->subdev.device; int ret, i; ret = nvkm_subdev_init(&pmu->subdev); Loading @@ -197,44 +202,44 @@ _nvkm_pmu_init(struct nvkm_object *object) pmu->pgob = nvkm_pmu_pgob; /* prevent previous ucode from running, wait for idle, reset */ nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000); nv_mask(pmu, 0x000200, 0x00002000, 0x00000000); nv_mask(pmu, 0x000200, 0x00002000, 0x00002000); nv_rd32(pmu, 0x000200); nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); nvkm_rd32(device, 0x000200); nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000); /* upload data segment */ nv_wr32(pmu, 0x10a1c0, 0x01000000); nvkm_wr32(device, 0x10a1c0, 0x01000000); for (i = 0; i < impl->data.size / 4; i++) nv_wr32(pmu, 0x10a1c4, impl->data.data[i]); nvkm_wr32(device, 0x10a1c4, impl->data.data[i]); /* upload code segment */ nv_wr32(pmu, 0x10a180, 0x01000000); nvkm_wr32(device, 0x10a180, 0x01000000); for (i = 0; i < impl->code.size / 4; i++) { if ((i & 0x3f) == 0) nv_wr32(pmu, 0x10a188, i >> 6); nv_wr32(pmu, 0x10a184, impl->code.data[i]); nvkm_wr32(device, 0x10a188, i >> 6); nvkm_wr32(device, 0x10a184, impl->code.data[i]); } /* start it running */ nv_wr32(pmu, 0x10a10c, 0x00000000); nv_wr32(pmu, 0x10a104, 0x00000000); nv_wr32(pmu, 0x10a100, 0x00000002); nvkm_wr32(device, 0x10a10c, 0x00000000); nvkm_wr32(device, 0x10a104, 0x00000000); nvkm_wr32(device, 0x10a100, 0x00000002); /* wait for valid host->pmu ring configuration */ if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000)) return -EBUSY; pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff; pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16; pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; /* wait for valid pmu->host ring configuration */ if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000)) return -EBUSY; pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff; pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16; pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; nv_wr32(pmu, 0x10a010, 0x000000e0); nvkm_wr32(device, 0x10a010, 0x000000e0); return 0; } Loading
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +32 −32 Original line number Diff line number Diff line Loading @@ -31,49 +31,49 @@ #include <subdev/timer.h> static void magic_(struct nvkm_pmu *pmu, u32 ctrl, int size) magic_(struct nvkm_device *device, u32 ctrl, int size) { nv_wr32(pmu, 0x00c800, 0x00000000); nv_wr32(pmu, 0x00c808, 0x00000000); nv_wr32(pmu, 0x00c800, ctrl); if (nv_wait(pmu, 0x00c800, 0x40000000, 0x40000000)) { nvkm_wr32(device, 0x00c800, 0x00000000); nvkm_wr32(device, 0x00c808, 0x00000000); nvkm_wr32(device, 0x00c800, ctrl); if (nv_wait(device, 0x00c800, 0x40000000, 0x40000000)) { while (size--) nv_wr32(pmu, 0x00c804, 0x00000000); nvkm_wr32(device, 0x00c804, 0x00000000); } nv_wr32(pmu, 0x00c800, 0x00000000); nvkm_wr32(device, 0x00c800, 0x00000000); } static void magic(struct nvkm_pmu *pmu, u32 ctrl) magic(struct nvkm_device *device, u32 ctrl) { magic_(pmu, 0x8000a41f | ctrl, 6); magic_(pmu, 0x80000421 | ctrl, 1); magic_(device, 0x8000a41f | ctrl, 6); magic_(device, 0x80000421 | ctrl, 1); } static void gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) { struct nvkm_device *device = nv_device(pmu); struct nvkm_device *device = pmu->subdev.device; nv_mask(pmu, 0x000200, 0x00001000, 0x00000000); nv_rd32(pmu, 0x000200); nv_mask(pmu, 0x000200, 0x08000000, 0x08000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); nvkm_rd32(device, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x08000000); msleep(50); nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); msleep(50); nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x000200, 0x08000000, 0x00000000); nv_mask(pmu, 0x000200, 0x00001000, 0x00001000); nv_rd32(pmu, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x00000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); nvkm_rd32(device, 0x000200); if (nv_device_match(device, 0x11fc, 0x17aa, 0x2211) /* Lenovo W541 */ || nv_device_match(device, 0x11fc, 0x17aa, 0x221e) /* Lenovo W541 */ Loading @@ -81,18 +81,18 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) nv_info(pmu, "hw bug workaround enabled\n"); switch (device->chipset) { case 0xe4: magic(pmu, 0x04000000); magic(pmu, 0x06000000); magic(pmu, 0x0c000000); magic(pmu, 0x0e000000); magic(device, 0x04000000); magic(device, 0x06000000); magic(device, 0x0c000000); magic(device, 0x0e000000); break; case 0xe6: magic(pmu, 0x02000000); magic(pmu, 0x04000000); magic(pmu, 0x0a000000); magic(device, 0x02000000); magic(device, 0x04000000); magic(device, 0x0a000000); break; case 0xe7: magic(pmu, 0x02000000); magic(device, 0x02000000); break; default: break; Loading
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c +15 −14 Original line number Diff line number Diff line Loading @@ -31,6 +31,7 @@ void gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) { struct nvkm_device *device = pmu->subdev.device; static const struct { u32 addr; u32 data; Loading @@ -54,28 +55,28 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) }; int i; nv_mask(pmu, 0x000200, 0x00001000, 0x00000000); nv_rd32(pmu, 0x000200); nv_mask(pmu, 0x000200, 0x08000000, 0x08000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); nvkm_rd32(device, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x08000000); msleep(50); nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000); nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000); for (i = 0; i < ARRAY_SIZE(magic); i++) { nv_wr32(pmu, magic[i].addr, magic[i].data); nvkm_wr32(device, magic[i].addr, magic[i].data); nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000); } nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); nv_mask(pmu, 0x000200, 0x08000000, 0x00000000); nv_mask(pmu, 0x000200, 0x00001000, 0x00001000); nv_rd32(pmu, 0x000200); nvkm_mask(device, 0x000200, 0x08000000, 0x00000000); nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); nvkm_rd32(device, 0x000200); } struct nvkm_oclass * Loading
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +10 −7 Original line number Diff line number Diff line Loading @@ -98,16 +98,18 @@ static int gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, struct gk20a_pmu_dvfs_dev_status *status) { status->busy = nv_rd32(pmu, 0x10a508 + (BUSY_SLOT * 0x10)); status->total= nv_rd32(pmu, 0x10a508 + (CLK_SLOT * 0x10)); struct nvkm_device *device = pmu->base.subdev.device; status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10)); status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10)); return 0; } static void gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) { nv_wr32(pmu, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); nv_wr32(pmu, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); struct nvkm_device *device = pmu->base.subdev.device; nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); } static void Loading Loading @@ -173,6 +175,7 @@ static int gk20a_pmu_init(struct nvkm_object *object) { struct gk20a_pmu *pmu = (void *)object; struct nvkm_device *device = pmu->base.subdev.device; int ret; ret = nvkm_subdev_init(&pmu->base.subdev); Loading @@ -182,9 +185,9 @@ gk20a_pmu_init(struct nvkm_object *object) pmu->base.pgob = nvkm_pmu_pgob; /* init pwr perf counter */ nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); nvkm_timer_alarm(pmu, 2000000000, &pmu->alarm); return ret; Loading
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c +3 −2 Original line number Diff line number Diff line Loading @@ -28,8 +28,9 @@ static int gt215_pmu_init(struct nvkm_object *object) { struct nvkm_pmu *pmu = (void *)object; nv_mask(pmu, 0x022210, 0x00000001, 0x00000000); nv_mask(pmu, 0x022210, 0x00000001, 0x00000001); struct nvkm_device *device = pmu->subdev.device; nvkm_mask(device, 0x022210, 0x00000001, 0x00000000); nvkm_mask(device, 0x022210, 0x00000001, 0x00000001); return nvkm_pmu_init(pmu); } Loading