Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- see http://www.panda3d.org/forums/viewtopic.php?p=76004#76004
- ======================================================
- # Author: Phil Light and Josh Yelon
- # Last Updated: 6/30/2007
- #
- # This tutorial shows how to determine what objects the mouse is pointing to,
- # while taking transparency into account, in a 2D environment.
- import math, time, random
- from direct.showbase.DirectObject import DirectObject
- #from direct.interval.IntervalGlobal import *
- from pandac.PandaModules import Point2
- from pandac.PandaModules import ConfigVariableBool
- from pandac.PandaModules import OrthographicLens
- from pandac.PandaModules import GraphicsOutput
- from pandac.PandaModules import CardMaker
- from pandac.PandaModules import PandaNode
- from pandac.PandaModules import NodePath
- from pandac.PandaModules import PNMImage
- from pandac.PandaModules import Texture
- from pandac.PandaModules import Shader
- from pandac.PandaModules import Vec4
- from direct.actor.Actor import Actor
- from direct.task.Task import Task
- class TextureBasedPicker( DirectObject ):
- """
- This class is designed to allow you to find out what a user is really clicking
- on in a situation where transparent textures are being used. The problem with
- normal ray-based collision detection (see the Tutorial on Picking) is that it
- works on geometry, and disregards textures. So if the geometry happens to be
- cards with partially-transparent textures (sprites) the invisible corners of
- a sprite would throw off a normal collision pass.
- +-------------+
- | | When two cards overlay one another as shown, a
- | +-------------+ click at the location marked with X would register
- | | | as a click on the guy at front, even though there
- | (=| X __ | is 'nothing' there. The intent of this class is
- | | (__) | to correctly pass the click through to the object
- | | ___|___ | behind.
- | | | |
- | | / \ |
- +---| / \ |
- | |
- +-------------+
- This class implements a totally different method for determining what was
- clicked. The basic idea is to override all the colors in a texture, and force
- them to be a color which we specify, or transparent. Then, when the mouse is
- clicked we look at the pixel and return the NodePath which we've mapped to
- the chosen color. Naturally, this forced-color stuff takes place in an
- offscreen buffer.
- This class has been implemented for Orthographic lenses only. Other types of
- camera lens should probably be able to use the same technique, but (i think)
- the motion of the camera (to track the mouse) would be slightly more
- complicated. At any rate, there are a few places where I'm assuming that the
- main window uses an Orthographic lens.
- """
- def __init__( self, rootNP, theCam, sceneLens ):
- """
- init expects a lens identical to the one in the scene we're picking items
- out of. We'll use it to find out how far to move the camera to track the
- mouse.
- """
- self.showBackBuffer = False
- self.halfFilmSize = ( sceneLens.getFilmSize().getX(),
- sceneLens.getFilmSize().getY(), )
- # you can't inspect a pixel being rendered unless you move it into main RAM.
- # these two lines allocate the place where we'll do that.
- self.pickLayer = PNMImage()
- # loading a debug switch from the .prc file
- debugForcedColor = ConfigVariableBool( "debug-forced-color", False )
- self.debugForcedColor = debugForcedColor.getValue()
- # uniqueBuffer is so called because all objects will be drawn with a unique
- # color in here. Note that it is (normally) 1x1 pixels. We don't need to
- # render very much! And this buffer is going to be copied from the video
- # card to main memory every frame, so we keep it small.
- self.factor = 4
- self.clickzPoz = Point2(0,0)
- if not self.debugForcedColor:
- self.uniqueBuffer = base.win.makeTextureBuffer("uniqueBuffer", 1, 1, Texture(), True)
- else:
- # to debug the buffer, make it a bit bigger and show the scene
- self.uniqueBuffer = base.win.makeTextureBuffer("uniqueBuffer", int(self.halfFilmSize[0] / self.factor), int(self.halfFilmSize[1] / self.factor), Texture(), True)
- self.pickTex = self.uniqueBuffer.getTexture()
- # we'll use a plain black bg to show when nothing at all is being selected.
- self.uniqueBuffer.setClearColor( Vec4( 0.0, 0.0, 0.0, 1.0 ) )
- # the frustrum of an Orthographic lens doesn't expand, and shoots straight
- # ahead like a laser.
- self.uniqueCam = base.makeCamera(self.uniqueBuffer)
- lens = OrthographicLens()
- lens.setNear( sceneLens.getNear() )
- lens.setFar( sceneLens.getFar() )
- if not self.debugForcedColor:
- # normally we use a 1-pixel buffer. For this, a very small film size.
- lens.setFilmSize( 0.1, 0.1 )
- else:
- # to debug the buffer, we'd like to see a bit more of the scene.
- lens.setFilmSize( self.halfFilmSize[0], self.halfFilmSize[1] )
- self.uniqueCam.getNode(0).setLens( lens )
- self.uniqueCam.reparentTo( rootNP ) # rootNP will typically be render
- self.uniqueCam.setPos( theCam.getPos() )
- # activate our special forced-color shader for the camera
- self.activateShader()
- if self.showBackBuffer:
- cm = CardMaker( "zomg" )
- #cm.setFrame( 320, 400, 240, 300 )
- cm.setFrame( -200, 200, 84, 384 )
- self.debugWindow = NodePath( cm.generate() )
- self.debugWindow.setTexture( self.pickTex )
- self.debugWindow.reparentTo( render )
- # this task will make the camera track the mouse
- #taskMgr.add( self.uniqueCamMoveTask, "move unique cam" )
- # This dictionary will be where we store a mapping of colors to nodepaths.
- # rather than using all 3 of the available colors, we'll give each object a
- # different red value and use those values for lookups into this dictionary.
- self.redValsToNPs = {}
- self.accept( "mouse1", self.updateCameraPos )
- def activateShader( self, doIt=True ):
- tempnode = NodePath( PandaNode( "temp node" ) )
- if doIt:
- tempnode.setShader( Shader.load( "forcedColor.sha" ) )
- else:
- tempnode.setShaderOff()
- self.uniqueCam.node().setInitialState(tempnode.getState())
- def uniqueCamMoveTask( self, task ):
- """
- This task tracks the camera to the mouse as it moves around the main scene.
- A couple of assumptions are made:
- 1. The main scene is viewed through an orthographic lens, meaning we can
- simply change the X, Z values of this camera without adding any heading/
- pitch changes.
- 2. All the contents we are to choose from are arranged at various depths on
- the Y axis, receding into the distance. If you wanted to turn the main
- camera so that the screen plane was XY instead of XZ, for example, this
- task would fail.
- 3. The world origin must be in the center of the screen; mpos values range
- from -1 to 1. We simply multiply those values by the film size / 2 to
- figure out where the camera belongs.
- """
- self.updateCameraPos()
- return Task.cont
- def updateCameraPos( self, testPos=None ):
- if base.mouseWatcherNode.hasMouse() or testPos is not None:
- if testPos is None:
- mpos = base.mouseWatcherNode.getMouse()
- else:
- mpos = testPos
- self.clickzPoz = Point2( (1 + mpos.getX()) * self.uniqueBuffer.getXSize() * 0.5, (1 - mpos.getY()) * self.uniqueBuffer.getYSize() * 0.5 )
- def setPickable( self, nodePaths, isPickable=True ):
- if ( type( nodePaths ) == list ) or (type( nodePaths ) == tuple):
- for np in nodePaths:
- self.doSetPickable( np, isPickable )
- else:
- self.doSetPickable( nodePaths, isPickable )
- # now that we've made (possibly several) changes, reassign colors to what's
- # left.
- self.reassignColors()
- def doSetPickable( self, nodePath, isPickable=True, ):
- """
- When making a Nodepath pickable, there's no need to do anything to flag the
- geometry. This method will simply add/remove the nodepath from its internal
- dictionary of colors to shapes, and reassign all the colors.
- """
- if isPickable:
- if nodePath not in self.redValsToNPs.values() and \
- nodePath is not None and \
- not nodePath.isEmpty():
- # we're going to add the new nodepath to redValsToNps using a temporary
- # key. when reassignColors() is called later, the key will be
- # overwritten with an actual red value. until then, this nodePath will
- # be unpickable.
- self.redValsToNPs["temp %f" % random.random()] = nodePath
- nodePath.setShaderInput( "threshold", 0.5, 0, 0, 1 ) # reverts it to "nothing"
- elif nodePath is not None and \
- not nodePath.isEmpty():
- currentKey = None
- for mapping in self.redValsToNPs.items():
- if mapping[1] == nodePath:
- currentKey = mapping[0]
- break
- if currentKey in self.redValsToNPs:
- del self.redValsToNPs[currentKey]
- nodePath.setShaderInput( "colorid", 0, 0, 0, 1 ) # reverts it to "nothing"
- def setAlphaThreshold( self, nodePath, alphaThreshold ):
- """
- This method lets you override the default opacity level (0.5) which will
- 'count' as an object. This allows you to add, say, a glow around something
- without messing up detection. Likewise, you can make artificially large
- clickable areas for images by surrounding them with very-nearly-transparent
- halos and then lowering the alpha threshold. This is also useful for things
- with holes in them, like text.
- """
- nodePath.setShaderInput( "threshold", alphaThreshold, 0, 0, 0 )
- def reassignColors( self ):
- """
- Black represents no node, or an unselectable one. All other colors used by
- the nodes will be shades of red. They will be chosen such that the
- difference between shades is as large as possible, and so that one of them
- will be pure red (1.0).
- The reason for this is that the shader seems to introduce very slight
- inaccuracies in the color that is assigned as shaderInput, and the one which
- is actually captured by the camera. See getCurrentlyPickedNP().
- """
- # first, delete the old mappings
- currentlyWatched = self.redValsToNPs.values()
- self.redValsToNPs.clear() # purge all the old mappings
- # now, make a new set of mappings from redVal to NodePath.
- numWatched = len( currentlyWatched )
- if numWatched > 0:
- desiredGap = 1.0 / numWatched # the difference between each red value.
- i = 1
- for thisNP in currentlyWatched:
- redval = i * desiredGap
- self.redValsToNPs[redval] = thisNP
- thisNP.setShaderInput( "colorid", redval, 0, 0, 1 )
- i += 1
- def getCurrentlyPickedNP( self, testPos=None ):
- """
- Return the object which is currently under the mouse. The color we get back
- from the camera turns out to be close--but not exactly identical to--the
- red value in the dictionary. Because of this, an O(n) linear search is used
- to find the closest match.
- """
- self.updateCameraPos(testPos)
- base.graphicsEngine.renderFrame()
- self.pickLayer.clear()
- self.pickTex.store( self.pickLayer ) # copy pickTex to an inspectable image
- if self.debugForcedColor:
- foundRed = self.pickLayer.getRed( int(self.clickzPoz.getX()), int(self.clickzPoz.getY()) )
- else:
- foundRed = self.pickLayer.getRed( 0, 0 )
- if 0.0 == foundRed or len( self.redValsToNPs ) < 1:
- return None
- redVals = self.redValsToNPs.keys()
- bestRed = redVals[0]
- bestDiff = math.fabs( foundRed - bestRed )
- for i in range( 1, len( redVals ) ):
- thisRed = redVals[i]
- diff = math.fabs( foundRed - thisRed )
- if diff < bestDiff:
- bestRed = thisRed
- bestDiff = diff
- return self.redValsToNPs[bestRed]
- def getRandomPickable( self ):
- listz = self.redValsToNPs.values()
- return listz[ random.randrange( 0, len(listz) ) ]
- class UnitTest( DirectObject ):
- """
- This is a simple class to exercise the TextureBasedPicker and demonstrate its
- use.
- """
- def __init__( self ):
- # some debugging values
- self.abuseMode = True
- self.moveCameraOnly = False
- self.outputNodePath = False
- self.showDebugWindow = False
- # basic setup stuff
- base.disableMouse()
- self.accept( "f1", base.oobe )
- self.accept( "f2", render.place )
- self.accept( "mouse1", self.handleMouseDown )
- # configure the camera & picker
- lens = OrthographicLens()
- lens.setFilmSize(800, 600) # for convenience, pixels == panda units
- base.cam.node().setLens( lens )
- camera.setPos( 0.0, -30.0, 0.0 )
- self.picker = TextureBasedPicker(render, camera, lens)
- # make some elements
- cm = CardMaker("elements")
- cm.setFrame( -400, 400, -300, 300 )
- sky = NodePath( cm.generate() )
- sky.setTexture( loader.loadTexture( "textures/pokeearff/beach/sky.png" ) )
- sky.reparentTo( render )
- sky.setPos( 0, 4000, 0 )
- # make hills
- cm.setFrame( -400, 400, -300, -14 )
- for i in range(4):
- hill = NodePath( cm.generate() )
- hill.setTexture( loader.loadTexture( "textures/pokeearff/beach/sand.png" ) )
- hill.setTransparency( True )
- hill.reparentTo( render )
- hill.setPos( -400 + 800 * (i % 2), 120 - 5 * i, 300 - 300 * (i / 2) )
- hill.setPythonTag( "name", "hill%i" % i )
- self.picker.setPickable( hill )
- # make clouds
- cm.setFrame( -23, 23, -28, 28 )
- for i in range(49):
- cloud = NodePath( cm.generate() )
- cloud.setTexture( loader.loadTexture( "textures/pokeearff/beach/cloud.png" ) )
- cloud.setTransparency( True )
- cloud.reparentTo( render )
- cloud.setPos( -360 + 120 * (i % 7), 100 - i, 240 - 80 * (i / 7) )
- cloud.setPythonTag( "name", "cloud%i" % i )
- self.picker.setPickable( cloud )
- # debug window to show you the current texture in the back buffer
- if self.showDebugWindow:
- cm.setFrame( -400, -360, 260, 300 )
- self.debugWindow = NodePath( cm.generate() )
- self.debugWindow.setTexture( self.picker.pickTex )
- self.debugWindow.reparentTo( render )
- # abuse mode
- if self.abuseMode:
- taskMgr.add( self.abuseTask, "do_it" )
- def abuseTask( self, task ):
- self.handleMouseDown( Point2( -1 + 2 * random.random(), -1 + 2 * random.random() ) )
- return Task.cont
- def handleMouseDown( self, tesPos=None ):
- if self.moveCameraOnly:
- self.picker.updateCameraPos(tesPos)
- else:
- np = self.picker.getCurrentlyPickedNP( tesPos )
- if self.outputNodePath:
- if np is None:
- print None
- else:
- print np.getPythonTag( "name" )
- # this little trick with __name__ allows you to type
- # "ppython TextureBasedPicker.py" and run the unit test, without ruining the
- # class's utility as a part of a bigger application.
- if __name__ == "__main__":
- import os
- if os.path.exists( "poke.prc" ):
- from pandac.PandaModules import loadPrcFile
- loadPrcFile("poke.prc")
- import direct.directbase.DirectStart
- p = UnitTest()
- run()
- ======================================================
- //Cg
- //
- //Cg profile arbvp1 arbfp1
- void vshader(
- in float4 vtx_position : POSITION,
- in float3 vtx_texcoord0 : TEXCOORD0,
- in float4 vtx_color : COLOR,
- in uniform float4x4 mat_modelproj,
- out float4 l_position : POSITION,
- out float4 l_color0 : COLOR0,
- out float3 l_texcoord0 : TEXCOORD0 )
- {
- l_position=mul(mat_modelproj, vtx_position);
- l_texcoord0 = vtx_texcoord0;
- l_color0=vtx_color;
- }
- void fshader(
- in uniform sampler2D tex_0 : TEXUNIT0,
- in uniform float4 k_colorid,
- in uniform float4 k_threshold,
- in float4 l_texcoord0 : TEXCOORD0,
- out float4 o_color : COLOR)
- {
- float4 surfacemap = tex2D(tex_0,l_texcoord0.xy);
- if (surfacemap.a > k_threshold.r) {
- o_color = k_colorid;
- } else {
- o_color = float4(0,0,0,0);
- }
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement