Advertisement
Guest User

Untitled

a guest
Oct 13th, 2019
125
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 20.84 KB | None | 0 0
  1. #include <cstdarg>
  2.  
  3. #include "GPU.h"
  4. #include "PSX.h"
  5. #include "Raster.h"
  6.  
  7.  
  8. void GPU::reset(PSX* psx) {
  9. _psx = psx;
  10. vram.fill(RGB_555(0x3, 0, 0x3));
  11. frame = 0;
  12. _rect_tex_flip_x = false;
  13. _rect_tex_flip_y = false;
  14. _vblank_interrupt = false;
  15. _vram_to_cpu_in_progress = false;
  16. _gp0_get_parameters = false;
  17. _gp0_vram_upload = false;
  18. _gp0_param_count = 0;
  19. _gp0_buffer.fill(0);
  20. _gpu_read_value = 0;
  21. _gp0_buff_idx = 0;
  22.  
  23. _display_area_vram_x = 0;
  24. _display_area_vram_y = 0;
  25. _display_area_horiz_start = 0;
  26. _display_area_horiz_end = 0;
  27. _display_area_vert_start = 0;
  28. _display_area_vert_end = 0;
  29.  
  30. _tex_win_mask_x = 0;
  31. _tex_win_mask_y = 0;
  32. _tex_win_off_x = 0;
  33. _tex_win_off_y = 0;
  34.  
  35. _draw_area_top = 0;
  36. _draw_area_left = 0;
  37. _draw_area_bottom = 0;
  38. _draw_area_right = 0;
  39.  
  40. _draw_offset_x = 0;
  41. _draw_offset_y = 0;
  42.  
  43. _line_tick = 0;
  44. _line = 0;
  45. _clock_frac = 0;
  46. stat.reset();
  47. vram_upload_state.reset();
  48. vram_download_state.reset();
  49. _current_gp0_cmd = 0;
  50. _currentCLUT = 0;
  51. _tri_count = 0;
  52. }
  53.  
  54. void GPU::do_save_state(SaveState &state) {
  55. state.add(vram);
  56. state.add(frame);
  57. state.add(_rect_tex_flip_x);
  58. state.add(_rect_tex_flip_y);
  59. state.add(_vblank_interrupt);
  60. state.add(_vram_to_cpu_in_progress);
  61. state.add(_gp0_get_parameters);
  62. state.add(_gp0_vram_upload);
  63. state.add(_gp0_param_count);
  64. state.add(_gp0_buffer);
  65. state.add(_gpu_read_value);
  66. state.add(_gp0_buff_idx);
  67.  
  68. state.add(_display_area_vram_x);
  69. state.add(_display_area_vram_y);
  70. state.add(_display_area_horiz_start);
  71. state.add(_display_area_horiz_end);
  72. state.add(_display_area_vert_start);
  73. state.add(_display_area_vert_end);
  74.  
  75. state.add(_tex_win_mask_x);
  76. state.add(_tex_win_mask_y);
  77. state.add(_tex_win_off_x);
  78. state.add(_tex_win_off_y);
  79.  
  80. state.add(_draw_area_top);
  81. state.add(_draw_area_left);
  82. state.add(_draw_area_bottom);
  83. state.add(_draw_area_right);
  84.  
  85. state.add(_draw_offset_x);
  86. state.add(_draw_offset_y);
  87.  
  88. state.add(_line_tick);
  89. state.add(_line);
  90. state.add(_clock_frac);
  91. state.add(stat);
  92. state.add(vram_upload_state);
  93. state.add(vram_download_state);
  94. state.add(_current_gp0_cmd);
  95. state.add(_tri_count);
  96. state.add(_currentCLUT);
  97. }
  98.  
  99. void GPU::panic(const char *fmt, ...) {
  100. char* buff = _psx->panic_message_buffer;
  101. sprintf(buff, "[GPU] Panic: ");
  102. buff += strlen(buff);
  103. va_list myargs;
  104. va_start(myargs, fmt);
  105. vsprintf(buff, fmt, myargs);
  106. va_end(myargs);
  107. buff += strlen(buff);
  108. sprintf(buff, "\n\ncurrent cmd: 0x%x\n", _current_gp0_cmd);
  109. for(u32 i = 0; i < std::min(_gp0_param_count, _gp0_buff_idx); i++) {
  110. buff += strlen(buff);
  111. sprintf(buff, " [%02d] 0x%08x\n", i, _gp0_buffer[i]);
  112. buff += strlen(buff);
  113. }
  114.  
  115. _psx->panic();
  116. }
  117.  
  118. u32 GPU::get_stat() {
  119. stat.interlace_field = true; // todo, why do I need this?
  120.  
  121. switch(stat.dma_direction) {
  122. case 0:
  123. stat.data_request = false;
  124. break;
  125. case 1:
  126. stat.data_request = true;
  127. break;
  128. case 2:
  129. stat.data_request = true;
  130. break;
  131. case 3:
  132. stat.data_request = true; // todo, only set this if command has been sent
  133. break;
  134. default:
  135. panic("unknown dma direction %d", stat.dma_direction);
  136. break;
  137. }
  138.  
  139. stat.ready_for_cmd = true;
  140. stat.ready_for_vram_download = true; // todo, don't always set
  141. stat.ready_for_dma = true;
  142.  
  143. if(_line < 242) { // todo, is this actually in blank?
  144. if(stat.v_res && stat.interlace_enable) {
  145. stat.even_odd = (frame & 1);
  146. } else {
  147. stat.even_odd = (_line & 1);
  148. }
  149. } else {
  150. stat.even_odd = false;
  151. }
  152.  
  153.  
  154. return stat.data;
  155. }
  156.  
  157. void GPU::step(u32 cyc) {
  158. u32 frac_clock_ratio = (7.f / 11.f) * 0x10000;
  159. u32 frac_cycles = _clock_frac + cyc * frac_clock_ratio;
  160. _clock_frac = frac_cycles & 0xffff;
  161.  
  162. _line_tick += (frac_cycles >> 16);
  163.  
  164. u32 oldLine = _line;
  165. _line += _line_tick / 3412;
  166.  
  167. if(_line != oldLine) {
  168. _psx->timer.h_blank();
  169. }
  170.  
  171. _line_tick = (_line_tick % 3412);
  172.  
  173. if(_line > 263) {
  174. if(stat.interlace_enable) {
  175. if((_line / 263) & 1) {
  176. stat.interlace_field = true;
  177. } else {
  178. stat.interlace_field = false;
  179. }
  180. }
  181. _line = (_line % 263);
  182. frame++;
  183. }
  184.  
  185. bool inBlank = _line >= 242; // todo, is this actually true?
  186. if(!_vblank_interrupt && inBlank) {
  187. _psx->cpu.interrupt(InterruptKind::VBLANK);
  188. }
  189.  
  190. _vblank_interrupt = inBlank;
  191. }
  192.  
  193. constexpr u32 gp0_polygon_parameter_count[0x20] = {
  194. 4, 0, 4, 0, 7, 7, 7, 7, 5, 0, 5, 0, 9, 9, 9, 9,
  195. 6, 0, 6, 0, 9, 0, 9, 0, 8, 0, 8, 0, 12, 0, 12, 0
  196. };
  197.  
  198. constexpr u32 gp0_rectangle_parameter_count[0x20] = {
  199. 3, 0, 3, 0, 4, 4, 4, 4, 2, 0, 2, 0, 0, 0, 0, 0,
  200. 2, 0, 2, 3, 3, 3, 3, 2, 2, 0, 2, 0, 3, 3, 3, 3
  201. //0 1 2 3 4 5 6 7 8
  202. };
  203.  
  204. void GPU::GP0(u32 cmd) {
  205.  
  206. if(_gp0_vram_upload) {
  207. vram_upload_state.remain--;
  208. u32 addr = 1024 * vram_upload_state.yc + vram_upload_state.xc;
  209.  
  210. u16 px1 = cmd;
  211. u16 px2 = cmd >> 16;
  212.  
  213. vram[addr] = px1;
  214. vram_upload_state.xc++;
  215. if(vram_upload_state.xc >= vram_upload_state.xl) {
  216. vram_upload_state.xc = vram_upload_state.x0;
  217. vram_upload_state.yc++;
  218. }
  219.  
  220.  
  221. addr = 1024 * vram_upload_state.yc + vram_upload_state.xc;
  222. vram[addr] = px2;
  223. vram_upload_state.xc++;
  224. if(vram_upload_state.xc >= vram_upload_state.xl) {
  225. vram_upload_state.xc = vram_upload_state.x0;
  226. vram_upload_state.yc++;
  227. }
  228.  
  229. if(!vram_upload_state.remain) {
  230. _gp0_vram_upload = false;
  231. if(_gp0_get_parameters) {
  232. panic("leaving vram upload with pending parameter upload");
  233. }
  234. }
  235. } else {
  236. if(_gp0_get_parameters) {
  237. _gp0_buffer[_gp0_buff_idx++] = cmd;
  238.  
  239. if(_gp0_buff_idx == _gp0_param_count) {
  240. if(_current_gp0_cmd >= 0x20 && _current_gp0_cmd < 0x40) {
  241. // done with polygon!
  242. gp0_render_poly(_gp0_buffer.data(), _current_gp0_cmd);
  243. _gp0_get_parameters = false;
  244. } else if(_current_gp0_cmd >= 0x60 && _current_gp0_cmd < 0x80) {
  245. // done with rectangle
  246. gp0_render_rect(_gp0_buffer.data(), _current_gp0_cmd);
  247. _gp0_get_parameters = false;
  248. } else if(_current_gp0_cmd == 0xa0) {
  249. // begin VRAM upload!
  250. gp0_a0_cpu_to_vram(_gp0_buffer.data());
  251. _gp0_get_parameters = false;
  252. } else if(_current_gp0_cmd == 0xc0) {
  253. // begin VRAM download
  254. gp0_c0_vram_to_cpu(_gp0_buffer.data());
  255. _gp0_get_parameters = false;
  256. } else if(_current_gp0_cmd == 2) {
  257. gp0_02_fill_rectangle(_gp0_buffer.data());
  258. _gp0_get_parameters = false;
  259. } else if(_current_gp0_cmd == 0x80) {
  260. gp0_80_copy_rectangle(_gp0_buffer.data());
  261. _gp0_get_parameters = false;
  262. }
  263.  
  264. else {
  265. panic("An unknown GP0 command has finished executing! 0x%x", _current_gp0_cmd);
  266. }
  267. }
  268. } else {
  269. _current_gp0_cmd = (cmd >> 24) & 0xff;
  270.  
  271. if(_current_gp0_cmd >= 0x20 && _current_gp0_cmd < 0x40) {
  272. _gp0_get_parameters = true;
  273. u32 param_cnt = gp0_polygon_parameter_count[_current_gp0_cmd - 0x20];
  274. if(!param_cnt) {
  275. panic("GP0 command of 0x%x (cmd 0x%08x) is an invalid polygon command", _current_gp0_cmd, cmd);
  276. }
  277. _gp0_buffer[0] = cmd;
  278. _gp0_param_count = param_cnt; // must be > 0, nothing takes 1 parameter.
  279. _gp0_buff_idx = 1;
  280.  
  281. } else if(_current_gp0_cmd >= 0x60 && _current_gp0_cmd < 0x80) {
  282. _gp0_get_parameters = true;
  283. u32 param_cnt = gp0_rectangle_parameter_count[_current_gp0_cmd - 0x60];
  284. if(!param_cnt) {
  285. panic("GP0 command of 0x%x (cmd 0x%08x) is an invalid rectangle command", _current_gp0_cmd, cmd);
  286. }
  287. _gp0_buffer[0] = cmd;
  288. _gp0_param_count = param_cnt; // must be > 0, nothing takes 1 parameter.
  289. _gp0_buff_idx = 1;
  290. }
  291.  
  292. else if(_current_gp0_cmd >= 0xe1 && _current_gp0_cmd <= 0xe6) {
  293. gp0_attribute_cmd(cmd, _current_gp0_cmd);
  294. } else if(_current_gp0_cmd == 0 || _current_gp0_cmd == 1) {
  295. // do nothing
  296. } else if(_current_gp0_cmd == 2) {
  297. _gp0_get_parameters = true;
  298. _gp0_buffer[0] = cmd;
  299. _gp0_param_count = 3; // must be > 0, nothing takes 1 parameter.
  300. _gp0_buff_idx = 1;
  301. } else if(_current_gp0_cmd == 0xa0) {
  302. // CPU to VRAM upload
  303. _gp0_get_parameters = true;
  304. _gp0_param_count = 2;
  305. _gp0_buff_idx = 0;
  306. } else if(_current_gp0_cmd == 0xc0) {
  307. // vram to CPU
  308. _gp0_get_parameters = true;
  309. _gp0_param_count = 2;
  310. _gp0_buff_idx = 0;
  311. } else if(_current_gp0_cmd == 0x80) {
  312. _gp0_get_parameters = true;
  313. _gp0_param_count = 3;
  314. _gp0_buff_idx = 0;
  315. }
  316.  
  317. else {
  318. panic("unknown GP0 command %d (0x%x)", _current_gp0_cmd, _current_gp0_cmd);
  319. }
  320. }
  321. }
  322. }
  323.  
  324. void GPU::gp0_attribute_cmd(u32 cmd, u8 id) {
  325. switch(id) {
  326. case 0xe1: // draw mode
  327. stat.tex_page_x = cmd & 0xf;
  328. stat.tex_page_y = ((cmd >> 4) & 1);
  329. stat.semi_transparency_mode = ((cmd >> 5) & 3);
  330. stat.tex_page_color_mode = (cmd >> 7) & 3;
  331. stat.dither = ((cmd >> 9) & 1);
  332. stat.draw_to_display_area = ((cmd >> 10) & 1);
  333. stat.tex_disable = ((cmd >> 11) & 1);
  334. _rect_tex_flip_x = ((cmd >> 12) & 1);
  335. _rect_tex_flip_y = ((cmd >> 13) & 1);
  336. break;
  337.  
  338. case 0xe2: // texture window
  339. _tex_win_mask_x = cmd & 0x1f;
  340. _tex_win_mask_y = (cmd >> 5) & 0x1f;
  341. _tex_win_off_x = ((cmd >> 10) & 0x1f);
  342. _tex_win_off_y = ((cmd >> 15) & 0x1f);
  343. break;
  344.  
  345. case 0xe3: // draw area top left
  346. _draw_area_top = (cmd >> 10) & 0x3ff;
  347. _draw_area_left = (cmd & 0x3ff);
  348. break;
  349.  
  350. case 0xe4: // draw area bottom right
  351. _draw_area_bottom = (cmd >> 10) & 0x3ff;
  352. _draw_area_right = (cmd & 0x3ff);
  353. break;
  354.  
  355. case 0xe5: // draw offset (must be sign extended!)
  356. {
  357. u16 x = (cmd & 0x7ff);
  358. u16 y = (cmd >> 11) & 0x7ff;
  359. _draw_offset_x = ((s16)(x << 5)) >> 5;
  360. _draw_offset_y = ((s16)(y << 5)) >> 5;
  361. }
  362. break;
  363.  
  364. case 0xe6:
  365. stat.set_mask_bit = cmd & 1;
  366. stat.use_mask_bit = cmd & 2;
  367. break;
  368.  
  369.  
  370. default:
  371. panic("unknown GP0 attribute command %d (0x%x)", id, id);
  372. }
  373. }
  374.  
  375. void GPU::gp0_render_poly(u32 *cmd, u8 id) {
  376. (void)cmd;
  377.  
  378. bool raw_tex = (id & (1 << 0));
  379. bool semi_trans = id & (1 << 1);
  380. bool texture = id & (1 << 2);
  381. bool quad = id & (1 << 3);
  382. bool shade = id & (1 << 4);
  383.  
  384. raster_dither = shade || (texture && !raw_tex);
  385.  
  386. if(!raw_tex && !shade && !texture) {
  387. // group 1 polys
  388. GpuColorAttr rgb = cmd[0];
  389. raster_rgb[0] = rgb.r();
  390. raster_rgb[1] = rgb.g();
  391. raster_rgb[2] = rgb.b();
  392. GpuVertexAttr verts[4] = {cmd[1], cmd[2], cmd[3], cmd[4]};
  393.  
  394. Triangle<0> tri;
  395.  
  396. for(u32 i = 0; i < 3; i++) {
  397. tri.verts[i] = GpuVertex<0>({verts[i].x(), verts[i].y()});
  398. }
  399.  
  400. // render triangle
  401. raster<0, false, false>(tri, semi_trans, raw_tex);
  402.  
  403. if(quad) {
  404. tri.verts[0] = GpuVertex<0>({verts[3].x(), verts[3].y()});
  405. }
  406. raster<0, false, false>(tri, semi_trans, raw_tex);
  407.  
  408. } else if(texture && !shade) {
  409. // group 2 polys
  410. GpuColorAttr rgb = cmd[0];
  411. raster_rgb[0] = rgb.r();
  412. raster_rgb[1] = rgb.g();
  413. raster_rgb[2] = rgb.b();
  414. GpuVertexAttr verts[4] = {cmd[1], cmd[3], cmd[5], cmd[7]};
  415. GpuCLUTAttr clut(cmd[2]);
  416. GpuTexpageAttr tpage(cmd[4]);
  417. GpuTexcoordAttr tc[4] = {cmd[2], cmd[4], cmd[6], cmd[8]};
  418.  
  419. _currentCLUT = clut.addr();
  420. stat.tex_page_x = tpage.x();
  421. stat.tex_page_y = tpage.y();
  422. stat.semi_transparency_mode = tpage.transparency();
  423. stat.tex_page_color_mode = tpage.color_depth();
  424. stat.tex_disable = tpage.tex_disable();
  425.  
  426. Triangle<2> tri;
  427.  
  428. for(u32 i = 0; i < 3; i++) {
  429. tri.verts[i] = GpuVertex<2>({verts[i].x(), verts[i].y(), tc[i].u(), tc[i].v()});
  430. }
  431.  
  432. raster<2, false, true>(tri, semi_trans, raw_tex);
  433. if(quad) {
  434. tri.verts[0] = GpuVertex<2>({verts[3].x(), verts[3].y(), tc[3].u(), tc[3].v()});
  435. }
  436. raster<2, false, true>(tri, semi_trans, raw_tex);
  437.  
  438.  
  439. } else if(!raw_tex && !texture && shade) {
  440. GpuColorAttr rgb[4] = {cmd[0], cmd[2], cmd[4], cmd[6]};
  441. GpuVertexAttr verts[4] = {cmd[1], cmd[3], cmd[5], cmd[7]};
  442.  
  443. Triangle<3> tri;
  444.  
  445. for(u32 i = 0; i < 3; i++) {
  446. tri.verts[i] = GpuVertex<3>({verts[i].x(), verts[i].y(), rgb[i].r(), rgb[i].g(), rgb[i].b()});
  447. }
  448.  
  449. raster<3, true, false>(tri, semi_trans, raw_tex);
  450. if(quad) {
  451. tri.verts[0] = GpuVertex<3>({verts[3].x(), verts[3].y(), rgb[3].r(), rgb[3].g(), rgb[3].b()});
  452. }
  453. raster<3, true, false>(tri, semi_trans, raw_tex);
  454. } else if(!raw_tex && texture && shade) {
  455. GpuColorAttr rgb[4] = {cmd[0], cmd[3], cmd[6], cmd[9]};
  456. GpuVertexAttr verts[4] = {cmd[1], cmd[4], cmd[7], cmd[10]};
  457. GpuTexcoordAttr tc[4] = {cmd[2], cmd[5], cmd[8], cmd[11]};
  458.  
  459. GpuCLUTAttr clut(cmd[2]);
  460. GpuTexpageAttr tpage(cmd[5]);
  461.  
  462. _currentCLUT = clut.addr();
  463. stat.tex_page_x = tpage.x();
  464. stat.tex_page_y = tpage.y();
  465. stat.semi_transparency_mode = tpage.transparency();
  466. stat.tex_page_color_mode = tpage.color_depth();
  467. stat.tex_disable = tpage.tex_disable();
  468.  
  469. Triangle<5> tri;
  470. for(u32 i = 0; i < 3; i++) {
  471. tri.verts[i] = GpuVertex<5>({verts[i].x(), verts[i].y(), rgb[i].r(), rgb[i].g(), rgb[i].b(), tc[i].u(), tc[i].v()});
  472. }
  473.  
  474. raster<5, true, true>(tri, semi_trans, raw_tex);
  475. if(quad) {
  476. tri.verts[0] = GpuVertex<5>({verts[3].x(), verts[3].y(), rgb[3].r(), rgb[3].g(), rgb[3].b(), tc[3].u(), tc[3].v()});
  477. }
  478. raster<5, true, true>(tri, semi_trans, raw_tex);
  479. }
  480.  
  481. else {
  482. panic("don't know how to render polygon 0x%x", id);
  483. }
  484. }
  485.  
  486. void GPU::gp0_render_rect(u32 *cmd, u8 id) {
  487. // todo dither?
  488. raster_dither = false;
  489. bool raw_tex = (id & (1 << 0));
  490. bool semi_trans = id & (1 << 1);
  491. bool texture = id & (1 << 2);
  492. u8 size_type = (id >> 3) & 3;
  493.  
  494. if(texture) {
  495. GpuColorAttr rgb(cmd[0]);
  496. GpuVertexAttr corner(cmd[1]);
  497. GpuTexcoordAttr tex(cmd[2]);
  498. GpuCLUTAttr clut(cmd[2]);
  499.  
  500. _currentCLUT = clut.addr();
  501.  
  502. // todo, only load if we need colors?
  503. raster_rgb[0] = rgb.r();
  504. raster_rgb[1] = rgb.g();
  505. raster_rgb[2] = rgb.b();
  506.  
  507. u16 x_size;
  508. u16 y_size;
  509.  
  510. switch(size_type) {
  511. case 0:
  512. x_size = cmd[3] & 0xffff;
  513. y_size = cmd[3] >> 16;
  514. break;
  515. case 3:
  516. x_size = 16;
  517. y_size = 16;
  518. break;
  519. default:
  520. panic("unknown size of textured rectangle %d (cmd 0x%x)", size_type, _current_gp0_cmd);
  521. return;
  522. }
  523.  
  524. if(x_size >= 1024 || y_size >= 512) {
  525. panic("rectangle of size %d %d is not supported", x_size, y_size);
  526. }
  527.  
  528. Triangle<2> tri;
  529. tri.verts[0] = GpuVertex<2>({corner.x(), corner.y(), tex.u(), tex.v()});
  530. tri.verts[1] = GpuVertex<2>({corner.x() + x_size, corner.y(), tex.u() + x_size, tex.v()});
  531. tri.verts[2] = GpuVertex<2>({corner.x(), corner.y() + y_size, tex.u(), tex.v() + y_size});
  532.  
  533. raster<2, false, true>(tri, semi_trans, raw_tex);
  534. tri.verts[0] = GpuVertex<2>({corner.x() + x_size, corner.y() + y_size, tex.u() + x_size, tex.v() + y_size});
  535. raster<2, false, true>(tri, semi_trans, raw_tex);
  536.  
  537.  
  538. } else {
  539. panic("untextured rectangle is not supported");
  540. }
  541. }
  542.  
  543. void GPU::gp0_02_fill_rectangle(u32 *cmd) {
  544. RGB_888 rgb = cmd[0];
  545. u8 r = rgb.r >> 3;
  546. u8 g = rgb.g >> 3;
  547. u8 b = rgb.b >> 3;
  548.  
  549. u32 x_start = (cmd[1] & 0xffff); // in size of 16 bits
  550. u32 y_start = (cmd[1] >> 16);
  551. u32 x_size = (cmd[2] & 0xffff);
  552. u32 y_size = (cmd[2] >> 16);
  553.  
  554. x_start = (x_start & 0x3ff);
  555. y_start = (y_start & 0x1ff);
  556.  
  557. x_size = ((x_size & 0x3ff) + 0xf) & (~0xf);
  558.  
  559. y_size = y_size & 0x1ff;
  560.  
  561. for(u32 y = y_start; y < y_start + y_size; y++) {
  562. for(u32 x = x_start; x < x_start + x_size; x++) {
  563. if(x >= 1024 || y >= 512) {
  564. panic("invalid fill rectangle @ %d %d\n", x,y);
  565. }
  566.  
  567. // draw!
  568. vram[(y * 1024) + x] = r + (((u32)g) << 5) + (((u32)b) << 10);
  569. }
  570. }
  571. }
  572.  
  573. void GPU::gp0_80_copy_rectangle(u32 *cmd) {
  574. u32 src_x = (cmd[0] & 0xffff);
  575. u32 src_y = (cmd[0] >> 16);
  576. u32 dst_x = (cmd[1] & 0xffff);
  577. u32 dst_y = (cmd[1] >> 16);
  578.  
  579. u32 x_size = (cmd[2] & 0xffff);
  580. u32 y_size = (cmd[2] >> 16);
  581.  
  582. // todo check these, and maybe mask starts too
  583. x_size = ((x_size & 0x3ff) + 0xf) & (~0xf);
  584. y_size = y_size & 0x1ff;
  585.  
  586. for(u32 y = 0; y < y_size; y++) {
  587. for(u32 x = 0; x < x_size; x++) {
  588. u32 src = (y + src_y) * 1024 + x + src_x;
  589. u32 dst = (y + dst_y) * 1024 + x + dst_x;
  590. vram[dst] = vram[src];
  591. }
  592. }
  593. }
  594.  
  595. void GPU::gp0_a0_cpu_to_vram(u32 *cmd) {
  596. u32 size = cmd[1];
  597. u32 width = size & 0xffff;
  598. u32 height = size >> 16;
  599. u32 imgPixels = width * height;
  600.  
  601. u32 destX = cmd[0] & 0xffff;
  602. u32 destY = (cmd[0] >> 16);
  603.  
  604.  
  605. vram_upload_state.x0 = destX;
  606. vram_upload_state.y0 = destY;
  607. vram_upload_state.xc = destX;
  608. vram_upload_state.yc = destY;
  609. vram_upload_state.xl = destX + width;
  610.  
  611. imgPixels = (imgPixels + 1) & (~1);
  612. vram_upload_state.remain = imgPixels / 2;
  613. _gp0_vram_upload = true;
  614. }
  615.  
  616. void GPU::gp0_c0_vram_to_cpu(u32 *cmd) {
  617. u32 size = cmd[1];
  618. u32 width = size & 0xffff;
  619. u32 height = size >> 16;
  620. u32 imgPixels = width * height;
  621.  
  622. u32 destX = cmd[0] & 0xffff;
  623. u32 destY = (cmd[0] >> 16);
  624.  
  625. vram_download_state.x0 = destX;
  626. vram_download_state.y0 = destY;
  627. vram_download_state.xc = destX;
  628. vram_download_state.yc = destY;
  629. vram_download_state.xl = destX + width;
  630.  
  631. imgPixels = (imgPixels + 1) & (~1);
  632. vram_download_state.remain = imgPixels / 2;
  633. _vram_to_cpu_in_progress = true;
  634. }
  635.  
  636. void GPU::GP1(u32 cmd) {
  637. u32 op = (cmd >> 24) & 0xff;
  638.  
  639. switch(op) {
  640. case 0: gp1_00_reset_gpu(); break;
  641. case 1: gp1_01_reset_cmd_buffer(); break;
  642. case 2: gp1_02_ack_irq(); break;
  643. case 3: gp1_03_display_enable(cmd); break;
  644. case 4: gp1_04_dma_direction(cmd); break;
  645. case 5: gp1_05_display_area_start(cmd); break;
  646. case 6: gp1_06_horiz_display_range(cmd); break;
  647. case 7: gp1_07_vert_display_range(cmd); break;
  648. case 8: gp1_08_display_mode(cmd); break;
  649. case 0x10: // todo, this
  650. break;
  651. default:
  652. panic("unknown GP1 command %d (0x%x)", op, op);
  653. }
  654. }
  655.  
  656. u32 GPU::GPUREAD() {
  657. if(_vram_to_cpu_in_progress) {
  658. vram_download_state.remain--;
  659. u32 addr = 1024 * vram_download_state.yc + vram_download_state.xc;
  660. u32 result = *(u32*)(&vram[addr]);
  661.  
  662. vram_download_state.xc+=2;
  663. if(vram_download_state.xc >= vram_download_state.xl) {
  664. vram_download_state.xc = vram_download_state.x0;
  665. vram_download_state.yc++;
  666. }
  667.  
  668. if(!vram_download_state.remain) {
  669. _vram_to_cpu_in_progress = false;
  670. printf("[GPU] download complete!\n");
  671. // printf(" just wrote %d %d\n", gp0Upload.xc, gp0Upload.yc);
  672. }
  673.  
  674. return result;
  675. } else {
  676. return _gpu_read_value;
  677. }
  678. }
  679.  
  680.  
  681. void GPU::gp1_00_reset_gpu() {
  682. gp1_01_reset_cmd_buffer();
  683. gp1_02_ack_irq();
  684. gp1_03_display_enable(1);
  685. gp1_04_dma_direction(GPU_DIR_OFF);
  686. gp1_05_display_area_start(0);
  687. // 06, 07
  688. _display_area_horiz_start = 0x200;
  689. _display_area_horiz_end = 0xc00;
  690. _display_area_vert_start = 0x10;
  691. _display_area_vert_end = 0x100;
  692. gp1_08_display_mode(0);
  693.  
  694. for(u32 i = 0; i < 6; i++) {
  695. gp0_attribute_cmd(0, 0xe1 + i);
  696. }
  697. }
  698.  
  699. void GPU::gp1_01_reset_cmd_buffer() {
  700. _gp0_get_parameters = false;
  701. _gp0_param_count = 0;
  702. }
  703.  
  704. void GPU::gp1_02_ack_irq() {
  705. stat.irq = false;
  706. }
  707.  
  708. void GPU::gp1_03_display_enable(u32 cmd) {
  709. stat.display_disable = cmd & 1;
  710. }
  711.  
  712. void GPU::gp1_04_dma_direction(u32 cmd) {
  713. stat.dma_direction = cmd & 3;
  714. if(stat.dma_direction == GPU_DIR_BAD) {
  715. panic("set bad dma direction");
  716. }
  717. }
  718.  
  719. void GPU::gp1_05_display_area_start(u32 cmd) {
  720. union r {
  721. bit_field<u32, u32, 0, 10> x;
  722. bit_field<u32, u32, 10, 9> y;
  723. u32 data;
  724. };
  725.  
  726. r reg;
  727. reg.data = cmd;
  728. _display_area_vram_x = reg.x & (~1); // clear lowest bit.
  729. _display_area_vram_y = reg.y;
  730. }
  731.  
  732. void GPU::gp1_06_horiz_display_range(u32 cmd) {
  733. _display_area_horiz_start = cmd & 0xfff;
  734. _display_area_horiz_end = ((cmd >> 12) & 0xfff);
  735. }
  736.  
  737. void GPU::gp1_07_vert_display_range(u32 cmd) {
  738. _display_area_vert_start = (cmd & 0x3ff);
  739. _display_area_vert_end = ((cmd >> 10) & 0x3ff);
  740. }
  741.  
  742. void GPU::gp1_08_display_mode(u32 cmd) {
  743. stat.h_res_1 = cmd & 3;
  744. stat.v_res = (cmd >> 2) & 1;
  745. stat.pal_mode = (cmd >> 3) & 1;
  746. stat.display_area_color_depth = (cmd >> 4) & 1;
  747. stat.interlace_enable = (cmd >> 5) & 1;
  748. stat.h_res_2 = (cmd >> 6) & 1;
  749. stat.reverse_flag = (cmd >> 7) & 1;
  750. if(stat.reverse_flag) {
  751. panic("reverse flag set");
  752. }
  753. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement