Advertisement
Guest User

Untitled

a guest
Sep 12th, 2011
127
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 15.55 KB | None | 0 0
  1. see http://www.panda3d.org/forums/viewtopic.php?p=76004#76004
  2.  
  3.  
  4. ======================================================
  5.  
  6. # Author: Phil Light and Josh Yelon
  7. # Last Updated: 6/30/2007
  8. #
  9. # This tutorial shows how to determine what objects the mouse is pointing to,
  10. # while taking transparency into account, in a 2D environment.
  11.  
  12. import math, time, random
  13. from direct.showbase.DirectObject import DirectObject
  14. #from direct.interval.IntervalGlobal import *
  15.  
  16. from pandac.PandaModules import Point2
  17.  
  18. from pandac.PandaModules import ConfigVariableBool
  19. from pandac.PandaModules import OrthographicLens
  20. from pandac.PandaModules import GraphicsOutput
  21. from pandac.PandaModules import CardMaker
  22. from pandac.PandaModules import PandaNode
  23. from pandac.PandaModules import NodePath
  24. from pandac.PandaModules import PNMImage
  25. from pandac.PandaModules import Texture
  26. from pandac.PandaModules import Shader
  27. from pandac.PandaModules import Vec4
  28. from direct.actor.Actor import Actor
  29. from direct.task.Task import Task
  30.  
  31. class TextureBasedPicker( DirectObject ):
  32. """
  33. This class is designed to allow you to find out what a user is really clicking
  34. on in a situation where transparent textures are being used. The problem with
  35. normal ray-based collision detection (see the Tutorial on Picking) is that it
  36. works on geometry, and disregards textures. So if the geometry happens to be
  37. cards with partially-transparent textures (sprites) the invisible corners of
  38. a sprite would throw off a normal collision pass.
  39.  
  40. +-------------+
  41. | | When two cards overlay one another as shown, a
  42. | +-------------+ click at the location marked with X would register
  43. | | | as a click on the guy at front, even though there
  44. | (=| X __ | is 'nothing' there. The intent of this class is
  45. | | (__) | to correctly pass the click through to the object
  46. | | ___|___ | behind.
  47. | | | |
  48. | | / \ |
  49. +---| / \ |
  50. | |
  51. +-------------+
  52.  
  53. This class implements a totally different method for determining what was
  54. clicked. The basic idea is to override all the colors in a texture, and force
  55. them to be a color which we specify, or transparent. Then, when the mouse is
  56. clicked we look at the pixel and return the NodePath which we've mapped to
  57. the chosen color. Naturally, this forced-color stuff takes place in an
  58. offscreen buffer.
  59.  
  60. This class has been implemented for Orthographic lenses only. Other types of
  61. camera lens should probably be able to use the same technique, but (i think)
  62. the motion of the camera (to track the mouse) would be slightly more
  63. complicated. At any rate, there are a few places where I'm assuming that the
  64. main window uses an Orthographic lens.
  65. """
  66. def __init__( self, rootNP, theCam, sceneLens ):
  67. """
  68. init expects a lens identical to the one in the scene we're picking items
  69. out of. We'll use it to find out how far to move the camera to track the
  70. mouse.
  71. """
  72. self.showBackBuffer = False
  73.  
  74. self.halfFilmSize = ( sceneLens.getFilmSize().getX(),
  75. sceneLens.getFilmSize().getY(), )
  76.  
  77. # you can't inspect a pixel being rendered unless you move it into main RAM.
  78. # these two lines allocate the place where we'll do that.
  79. self.pickLayer = PNMImage()
  80.  
  81. # loading a debug switch from the .prc file
  82. debugForcedColor = ConfigVariableBool( "debug-forced-color", False )
  83. self.debugForcedColor = debugForcedColor.getValue()
  84.  
  85. # uniqueBuffer is so called because all objects will be drawn with a unique
  86. # color in here. Note that it is (normally) 1x1 pixels. We don't need to
  87. # render very much! And this buffer is going to be copied from the video
  88. # card to main memory every frame, so we keep it small.
  89. self.factor = 4
  90. self.clickzPoz = Point2(0,0)
  91. if not self.debugForcedColor:
  92. self.uniqueBuffer = base.win.makeTextureBuffer("uniqueBuffer", 1, 1, Texture(), True)
  93. else:
  94. # to debug the buffer, make it a bit bigger and show the scene
  95. self.uniqueBuffer = base.win.makeTextureBuffer("uniqueBuffer", int(self.halfFilmSize[0] / self.factor), int(self.halfFilmSize[1] / self.factor), Texture(), True)
  96. self.pickTex = self.uniqueBuffer.getTexture()
  97.  
  98. # we'll use a plain black bg to show when nothing at all is being selected.
  99. self.uniqueBuffer.setClearColor( Vec4( 0.0, 0.0, 0.0, 1.0 ) )
  100.  
  101. # the frustrum of an Orthographic lens doesn't expand, and shoots straight
  102. # ahead like a laser.
  103. self.uniqueCam = base.makeCamera(self.uniqueBuffer)
  104. lens = OrthographicLens()
  105. lens.setNear( sceneLens.getNear() )
  106. lens.setFar( sceneLens.getFar() )
  107. if not self.debugForcedColor:
  108. # normally we use a 1-pixel buffer. For this, a very small film size.
  109. lens.setFilmSize( 0.1, 0.1 )
  110. else:
  111. # to debug the buffer, we'd like to see a bit more of the scene.
  112. lens.setFilmSize( self.halfFilmSize[0], self.halfFilmSize[1] )
  113. self.uniqueCam.getNode(0).setLens( lens )
  114.  
  115. self.uniqueCam.reparentTo( rootNP ) # rootNP will typically be render
  116. self.uniqueCam.setPos( theCam.getPos() )
  117.  
  118. # activate our special forced-color shader for the camera
  119. self.activateShader()
  120.  
  121. if self.showBackBuffer:
  122. cm = CardMaker( "zomg" )
  123. #cm.setFrame( 320, 400, 240, 300 )
  124. cm.setFrame( -200, 200, 84, 384 )
  125. self.debugWindow = NodePath( cm.generate() )
  126. self.debugWindow.setTexture( self.pickTex )
  127. self.debugWindow.reparentTo( render )
  128.  
  129.  
  130. # this task will make the camera track the mouse
  131. #taskMgr.add( self.uniqueCamMoveTask, "move unique cam" )
  132.  
  133. # This dictionary will be where we store a mapping of colors to nodepaths.
  134. # rather than using all 3 of the available colors, we'll give each object a
  135. # different red value and use those values for lookups into this dictionary.
  136. self.redValsToNPs = {}
  137.  
  138. self.accept( "mouse1", self.updateCameraPos )
  139.  
  140. def activateShader( self, doIt=True ):
  141. tempnode = NodePath( PandaNode( "temp node" ) )
  142. if doIt:
  143. tempnode.setShader( Shader.load( "forcedColor.sha" ) )
  144. else:
  145. tempnode.setShaderOff()
  146. self.uniqueCam.node().setInitialState(tempnode.getState())
  147.  
  148. def uniqueCamMoveTask( self, task ):
  149. """
  150. This task tracks the camera to the mouse as it moves around the main scene.
  151. A couple of assumptions are made:
  152. 1. The main scene is viewed through an orthographic lens, meaning we can
  153. simply change the X, Z values of this camera without adding any heading/
  154. pitch changes.
  155. 2. All the contents we are to choose from are arranged at various depths on
  156. the Y axis, receding into the distance. If you wanted to turn the main
  157. camera so that the screen plane was XY instead of XZ, for example, this
  158. task would fail.
  159. 3. The world origin must be in the center of the screen; mpos values range
  160. from -1 to 1. We simply multiply those values by the film size / 2 to
  161. figure out where the camera belongs.
  162. """
  163. self.updateCameraPos()
  164. return Task.cont
  165.  
  166. def updateCameraPos( self, testPos=None ):
  167. if base.mouseWatcherNode.hasMouse() or testPos is not None:
  168. if testPos is None:
  169. mpos = base.mouseWatcherNode.getMouse()
  170. else:
  171. mpos = testPos
  172.  
  173. self.clickzPoz = Point2( (1 + mpos.getX()) * self.uniqueBuffer.getXSize() * 0.5, (1 - mpos.getY()) * self.uniqueBuffer.getYSize() * 0.5 )
  174.  
  175. def setPickable( self, nodePaths, isPickable=True ):
  176. if ( type( nodePaths ) == list ) or (type( nodePaths ) == tuple):
  177. for np in nodePaths:
  178. self.doSetPickable( np, isPickable )
  179. else:
  180. self.doSetPickable( nodePaths, isPickable )
  181.  
  182. # now that we've made (possibly several) changes, reassign colors to what's
  183. # left.
  184. self.reassignColors()
  185.  
  186. def doSetPickable( self, nodePath, isPickable=True, ):
  187. """
  188. When making a Nodepath pickable, there's no need to do anything to flag the
  189. geometry. This method will simply add/remove the nodepath from its internal
  190. dictionary of colors to shapes, and reassign all the colors.
  191. """
  192. if isPickable:
  193. if nodePath not in self.redValsToNPs.values() and \
  194. nodePath is not None and \
  195. not nodePath.isEmpty():
  196. # we're going to add the new nodepath to redValsToNps using a temporary
  197. # key. when reassignColors() is called later, the key will be
  198. # overwritten with an actual red value. until then, this nodePath will
  199. # be unpickable.
  200. self.redValsToNPs["temp %f" % random.random()] = nodePath
  201. nodePath.setShaderInput( "threshold", 0.5, 0, 0, 1 ) # reverts it to "nothing"
  202. elif nodePath is not None and \
  203. not nodePath.isEmpty():
  204. currentKey = None
  205. for mapping in self.redValsToNPs.items():
  206. if mapping[1] == nodePath:
  207. currentKey = mapping[0]
  208. break
  209. if currentKey in self.redValsToNPs:
  210. del self.redValsToNPs[currentKey]
  211. nodePath.setShaderInput( "colorid", 0, 0, 0, 1 ) # reverts it to "nothing"
  212.  
  213. def setAlphaThreshold( self, nodePath, alphaThreshold ):
  214. """
  215. This method lets you override the default opacity level (0.5) which will
  216. 'count' as an object. This allows you to add, say, a glow around something
  217. without messing up detection. Likewise, you can make artificially large
  218. clickable areas for images by surrounding them with very-nearly-transparent
  219. halos and then lowering the alpha threshold. This is also useful for things
  220. with holes in them, like text.
  221. """
  222. nodePath.setShaderInput( "threshold", alphaThreshold, 0, 0, 0 )
  223.  
  224. def reassignColors( self ):
  225. """
  226. Black represents no node, or an unselectable one. All other colors used by
  227. the nodes will be shades of red. They will be chosen such that the
  228. difference between shades is as large as possible, and so that one of them
  229. will be pure red (1.0).
  230.  
  231. The reason for this is that the shader seems to introduce very slight
  232. inaccuracies in the color that is assigned as shaderInput, and the one which
  233. is actually captured by the camera. See getCurrentlyPickedNP().
  234. """
  235. # first, delete the old mappings
  236. currentlyWatched = self.redValsToNPs.values()
  237. self.redValsToNPs.clear() # purge all the old mappings
  238.  
  239. # now, make a new set of mappings from redVal to NodePath.
  240. numWatched = len( currentlyWatched )
  241. if numWatched > 0:
  242. desiredGap = 1.0 / numWatched # the difference between each red value.
  243. i = 1
  244. for thisNP in currentlyWatched:
  245. redval = i * desiredGap
  246. self.redValsToNPs[redval] = thisNP
  247. thisNP.setShaderInput( "colorid", redval, 0, 0, 1 )
  248. i += 1
  249. def getCurrentlyPickedNP( self, testPos=None ):
  250. """
  251. Return the object which is currently under the mouse. The color we get back
  252. from the camera turns out to be close--but not exactly identical to--the
  253. red value in the dictionary. Because of this, an O(n) linear search is used
  254. to find the closest match.
  255. """
  256. self.updateCameraPos(testPos)
  257. base.graphicsEngine.renderFrame()
  258. self.pickLayer.clear()
  259. self.pickTex.store( self.pickLayer ) # copy pickTex to an inspectable image
  260. if self.debugForcedColor:
  261. foundRed = self.pickLayer.getRed( int(self.clickzPoz.getX()), int(self.clickzPoz.getY()) )
  262. else:
  263. foundRed = self.pickLayer.getRed( 0, 0 )
  264.  
  265. if 0.0 == foundRed or len( self.redValsToNPs ) < 1:
  266. return None
  267.  
  268. redVals = self.redValsToNPs.keys()
  269. bestRed = redVals[0]
  270. bestDiff = math.fabs( foundRed - bestRed )
  271. for i in range( 1, len( redVals ) ):
  272. thisRed = redVals[i]
  273. diff = math.fabs( foundRed - thisRed )
  274. if diff < bestDiff:
  275. bestRed = thisRed
  276. bestDiff = diff
  277. return self.redValsToNPs[bestRed]
  278.  
  279. def getRandomPickable( self ):
  280. listz = self.redValsToNPs.values()
  281. return listz[ random.randrange( 0, len(listz) ) ]
  282.  
  283. class UnitTest( DirectObject ):
  284. """
  285. This is a simple class to exercise the TextureBasedPicker and demonstrate its
  286. use.
  287. """
  288.  
  289. def __init__( self ):
  290. # some debugging values
  291. self.abuseMode = True
  292. self.moveCameraOnly = False
  293. self.outputNodePath = False
  294. self.showDebugWindow = False
  295.  
  296. # basic setup stuff
  297. base.disableMouse()
  298. self.accept( "f1", base.oobe )
  299. self.accept( "f2", render.place )
  300. self.accept( "mouse1", self.handleMouseDown )
  301.  
  302. # configure the camera & picker
  303. lens = OrthographicLens()
  304. lens.setFilmSize(800, 600) # for convenience, pixels == panda units
  305. base.cam.node().setLens( lens )
  306. camera.setPos( 0.0, -30.0, 0.0 )
  307. self.picker = TextureBasedPicker(render, camera, lens)
  308.  
  309. # make some elements
  310. cm = CardMaker("elements")
  311. cm.setFrame( -400, 400, -300, 300 )
  312. sky = NodePath( cm.generate() )
  313. sky.setTexture( loader.loadTexture( "textures/pokeearff/beach/sky.png" ) )
  314. sky.reparentTo( render )
  315. sky.setPos( 0, 4000, 0 )
  316.  
  317. # make hills
  318. cm.setFrame( -400, 400, -300, -14 )
  319. for i in range(4):
  320. hill = NodePath( cm.generate() )
  321. hill.setTexture( loader.loadTexture( "textures/pokeearff/beach/sand.png" ) )
  322. hill.setTransparency( True )
  323. hill.reparentTo( render )
  324. hill.setPos( -400 + 800 * (i % 2), 120 - 5 * i, 300 - 300 * (i / 2) )
  325. hill.setPythonTag( "name", "hill%i" % i )
  326. self.picker.setPickable( hill )
  327.  
  328. # make clouds
  329. cm.setFrame( -23, 23, -28, 28 )
  330. for i in range(49):
  331. cloud = NodePath( cm.generate() )
  332. cloud.setTexture( loader.loadTexture( "textures/pokeearff/beach/cloud.png" ) )
  333. cloud.setTransparency( True )
  334. cloud.reparentTo( render )
  335. cloud.setPos( -360 + 120 * (i % 7), 100 - i, 240 - 80 * (i / 7) )
  336. cloud.setPythonTag( "name", "cloud%i" % i )
  337. self.picker.setPickable( cloud )
  338.  
  339. # debug window to show you the current texture in the back buffer
  340. if self.showDebugWindow:
  341. cm.setFrame( -400, -360, 260, 300 )
  342. self.debugWindow = NodePath( cm.generate() )
  343. self.debugWindow.setTexture( self.picker.pickTex )
  344. self.debugWindow.reparentTo( render )
  345.  
  346. # abuse mode
  347. if self.abuseMode:
  348. taskMgr.add( self.abuseTask, "do_it" )
  349.  
  350. def abuseTask( self, task ):
  351. self.handleMouseDown( Point2( -1 + 2 * random.random(), -1 + 2 * random.random() ) )
  352. return Task.cont
  353.  
  354. def handleMouseDown( self, tesPos=None ):
  355. if self.moveCameraOnly:
  356. self.picker.updateCameraPos(tesPos)
  357. else:
  358. np = self.picker.getCurrentlyPickedNP( tesPos )
  359. if self.outputNodePath:
  360. if np is None:
  361. print None
  362. else:
  363. print np.getPythonTag( "name" )
  364.  
  365. # this little trick with __name__ allows you to type
  366. # "ppython TextureBasedPicker.py" and run the unit test, without ruining the
  367. # class's utility as a part of a bigger application.
  368. if __name__ == "__main__":
  369. import os
  370. if os.path.exists( "poke.prc" ):
  371. from pandac.PandaModules import loadPrcFile
  372. loadPrcFile("poke.prc")
  373. import direct.directbase.DirectStart
  374. p = UnitTest()
  375. run()
  376.  
  377.  
  378.  
  379.  
  380. ======================================================
  381.  
  382.  
  383.  
  384.  
  385.  
  386.  
  387.  
  388.  
  389.  
  390.  
  391.  
  392.  
  393.  
  394.  
  395.  
  396.  
  397.  
  398.  
  399. //Cg
  400. //
  401. //Cg profile arbvp1 arbfp1
  402.  
  403. void vshader(
  404. in float4 vtx_position : POSITION,
  405. in float3 vtx_texcoord0 : TEXCOORD0,
  406. in float4 vtx_color : COLOR,
  407. in uniform float4x4 mat_modelproj,
  408. out float4 l_position : POSITION,
  409. out float4 l_color0 : COLOR0,
  410. out float3 l_texcoord0 : TEXCOORD0 )
  411. {
  412. l_position=mul(mat_modelproj, vtx_position);
  413. l_texcoord0 = vtx_texcoord0;
  414. l_color0=vtx_color;
  415. }
  416.  
  417. void fshader(
  418. in uniform sampler2D tex_0 : TEXUNIT0,
  419. in uniform float4 k_colorid,
  420. in uniform float4 k_threshold,
  421. in float4 l_texcoord0 : TEXCOORD0,
  422. out float4 o_color : COLOR)
  423. {
  424. float4 surfacemap = tex2D(tex_0,l_texcoord0.xy);
  425. if (surfacemap.a > k_threshold.r) {
  426. o_color = k_colorid;
  427. } else {
  428. o_color = float4(0,0,0,0);
  429. }
  430. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement