- Timestamp:
- 09/16/05 10:41:52 (19 years ago)
- Location:
- trunk/VUT/doc/SciReport
- Files:
-
- 4 added
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/VUT/doc/SciReport/Bib/jiri.bib
r243 r277 1 @Article{wimmer05:gpugems, 2 author = {Michael Wimmer and Ji\v{r}\'\i{} Bittner}, 3 title = {Hardware Occlusion Queries Made Useful}, 4 journal = {GPU Gems 2: Programming Techniques for High-Performance Graphics and General-Purpose Computation}, 5 year = {2005}, 6 pages = {91--108}, 7 publisher = {Addison Wesley} 8 } 9 1 10 @Article{bittner03:jep, 2 11 author = {Ji\v{r}\'\i{} Bittner and Peter Wonka}, … … 12 21 13 22 14 @PhdThesis{bittner:0 3:phd,23 @PhdThesis{bittner:02:phd, 15 24 author = {Ji\v{r}\'\i{} Bittner}, 16 25 title = {Hierarchical Techniques for Visibility Computations}, -
trunk/VUT/doc/SciReport/Bib/new.bib
r251 r277 1 @Article{avis96, 2 author = "David Avis and Komei Fukuda", 3 title = "Reverse Search for Enumeration", 4 journal = "Discrete Applied Mathematics", 5 year = "1996", 6 volume = "6", 7 pages = "21--46" 8 } 9 10 @Article{Fukuda:1996:DDM, 11 author = "K. Fukuda and A. Prodon", 12 title = "Double Description Method Revisited", 13 journal = "Lecture Notes in Computer Science", 14 volume = "1120", 15 pages = "91--111", 16 year = "1996", 17 coden = "LNCSD9", 18 ISSN = "0302-9743", 19 bibdate = "Mon Aug 25 16:23:54 MDT 1997", 20 acknowledgement = ack-nhfb, 21 } 22 23 @Misc{lrs_site, 24 author = {David Avis}, 25 title = {LRS polyhedra enumeration library}, 26 year = {2002}, 27 note = "Available at \url{http://cgm.cs.mcgill.ca/~avis/C/lrs.html}", 28 } 29 30 @InProceedings{buehler2001, 31 pages = "425--432", 32 year = "2001", 33 title = "Unstructured Lumigraph Rendering", 34 author = "Chris Buehler and Michael Bosse and Leonard McMillan 35 and Steven J. Gortler and Michael F. Cohen", 36 abstract = "We describe an image based rendering approach that 37 generalizes many current image based rendering 38 algorithms, including light field rendering and 39 view-dependent texture mapping. In particular, it 40 allows for lumigraph-style rendering from a set of 41 input cameras in arbitrary configurations (i.e., not 42 restricted to a plane or to any specific manifold). In 43 the case of regular and planar input camera positions, 44 our algorithm reduces to a typical lumigraph approach. 45 When presented with fewer cameras and good approximate 46 geometry, our algorithm behaves like view-dependent 47 texture mapping. The algorithm achieves this 48 flexibility because it is designed to meet a set of 49 specific goals that we describe. We demonstrate this 50 flexibility with a variety of examples.", 51 keywords = "Image-Based Rendering", 52 booktitle = "Computer Graphics (SIGGRAPH '01 Proceedings)", 53 } 54 55 @InProceedings{haumont2005:egsr, 56 pages = "211--222", 57 year = "2005", 58 title = "A Low Dimensional Framework for Exact Polygon-to-Polygon Occlusion Queries", 59 author = {Denis Haumont and Otso M\"{a}kinen and Shaun Nirenstein}, 60 booktitle = "Proceedings of Eurographics Symposium on Rendering", 61 } 62 63 64 65 @InProceedings{Sadagic, 66 pages = "111--122", 67 year = "2000", 68 title = "Dynamic Polygon Visibility Ordering for Head-Slaved 69 Viewing in Virtual Environments", 70 author = "Amela Sadagic and Mel Slater", 71 url = "http://visinfo.zib.de/EVlib/Show?EVL-2000-336", 72 abstract = "This paper presents an approach to visibility called 73 the Viewpoint Movement Space (VpMS) algorithm which 74 supports the concept of dynamic polygon visibility 75 orderings for head-slaved viewing in virtual 76 environments (VE). The central idea of the approach is 77 that the visibility, in terms of back-to-front polygon 78 visibility ordering, does not change dramatically as 79 the viewpoint moves. Moreover, it is possible to 80 construct a partition of the space into cells, where 81 for each cell the ordering is invariant. As the 82 viewpoint moves across a cell boundary typically only a 83 small and predictable change is made to the visibility 84 ordering. The cost to perform this operation represents 85 a notable reduction when compared with the cost of 86 resolving the visibility information from the BSP tree 87 where the classification of the viewpoint with every 88 node plane has to be performed. The paper demonstrates 89 how the subdivision into such cells can represent the 90 basic source for an acceleration of the rendering 91 process. We also discuss how the same supportive data 92 structure can be exploited to solve other tasks in the 93 graphics pipeline.", 94 organization = "Eurographics Association", 95 volume = "19(2)", 96 booktitle = "Computer Graphics Forum", 97 } 98 99 @Book{dcg-handbook, 100 title = "Handbook of Discrete and Computational Geometry", 101 booktitle = "Handbook of Discrete and Computational Geometry", 102 publisher = "CRC Press", 103 year = "1997", 104 editor = "Jacob E. Goodman and Joseph O'Rourke", 105 } 106 107 @PhdThesis{Pu98-DSGIV, 108 author = "Fan-Tao Pu", 109 year = "1998", 110 title = "Data Structures for Global Illumination and Visibility 111 Queries in 3-Space", 112 address = "College Park, MD", 113 school = "University of Maryland", 114 } 115 1 116 @TechReport{Hillesland02, 2 117 author = "Karl Hillesland and Brian Salomon and Anselmo Lastra -
trunk/VUT/doc/SciReport/analysis.tex
r266 r277 1 1 \chapter{Analysis of Visibility in Polygonal Scenes} 2 2 3 \label{chap:analysis} 4 3 5 This chapter provides analysis of the visibility in polygonal scenes, 4 which are the input for all developed tools. The visibility analysis6 which are the input for all developed algorithms. The visibility analysis 5 7 uncoveres the complexity of the from-region and global visibility 6 8 problems and thus it especially important for a good design of the … … 9 11 visibility interactions can be observed easily by interactions of sets 10 12 of points. Additionally for the sake of clarity, we first analyze 11 visibility in 2D and then extend the analysis to 3D polygonal scenes. 13 visibility in 2D and then extend the analysis to 3D polygonal scenes. Visibility in 3D is described using \plucker 12 14 13 15 \section{Analysis of visibility in 2D} … … 51 53 coordinates $l^*_1 = (a, b, c)$ and $l^*_2 = -(a,b,c)$. The \plucker 52 54 coordinates of 2D lines defined in this chapter are a simplified form 53 of the \plucker coordinates for 3D lines that will be discussed in54 Chapter~\ref{chap:rot3d}: \plucker coordinates of a 2D line correspond55 of the \plucker coordinates for 3D lines, which will be discussed 56 bellow in this chapter: \plucker coordinates of a 2D line correspond 55 57 to the \plucker coordinates of a 3D line embedded in the $z = 0$ plane 56 58 after removal of redundant coordinates (equal to 0) and permutation of … … 287 289 primal space. This observation is supported by the classification of 288 290 visibility problems presented in 289 Chapter~\ref{chap: classes}. Visibility from point in 3D and visibility291 Chapter~\ref{chap:introduction}. Visibility from point in 3D and visibility 290 292 from region in 2D induce a two-dimensional problem-relevant line 291 293 set. This suggests the possibility of mapping one problem to another. … … 500 502 equation~\ref{eq:plucker_quadric}. Thus there are four degrees of 501 503 freedom in the description of a 3D line, which conforms with the 502 classification from Chapter~\ref{chap: overview}.504 classification from Chapter~\ref{chap:introduction}. 503 505 504 506 \plucker coordinates allow to detect an incidence of two lines by … … 799 801 The blocker polyhedron describes lines intersecting the source 800 802 polygon and the given occluder. The blocker polyhedron can be seen 801 as an extension of the blocker polygon discussed in 802 Chapters~\ref{chap:rot2d} and~\ref{chap:rot3d} for the from-region 803 as an extension of the blocker polygon discussed above for the from-region 803 804 visibility in 3D scenes. The blocker polyhedron is a 5D polyhedron in 804 805 a 5D projective space. To avoid singularities in the projection from … … 899 900 900 901 901 For solution of some from-region visibility problems (e.g. PVS902 computation, region-to-region visibility) the event swaths need not be903 reconstructed. For example the visibility culling algorithm that will904 be discussed in Section~\ref{sec:rot3pvs} only computes extremal905 \plucker points and uses them to test an existence of a set of906 stabbers of a given size.902 % For solution of some from-region visibility problems (e.g. PVS 903 % computation, region-to-region visibility) the event swaths need not be 904 % reconstructed. For example the visibility culling algorithm that will 905 % be discussed in Section~\ref{sec:rot3pvs} only computes extremal 906 % \plucker points and uses them to test an existence of a set of 907 % stabbers of a given size. 907 908 908 909 -
trunk/VUT/doc/SciReport/introduction.tex
r273 r277 1 1 \chapter{Introduction}%\chapter 2 2 3 \label{chap:overview} 4 \label{chap:classes} 5 6 This chapter provides introduction to visibility problems and 7 algorithms for computer graphics. In particular it presents a taxonomy 8 of visibility problems and algorithms. We discuss typical visibility 9 problems encountered in computer graphics including their relation to 10 the presented taxonomy. Finally, we classify the later described 11 visibility tools according to the presented taxonomy. 12 3 \label{chap:introduction} 13 4 14 5 \section{Structure of the report} 6 7 The report consists of two introductory chapters, which provide a 8 theoretical background for description of the algorithms, and three 9 chapters dealing with the actual visibility algorithms. 10 11 This chapter provides an introduction to visibility by using a 12 taxonomy of visibility problems and algorithms. The taxonomy is used 13 to classify the later described visibility 14 algorithms. Chapter~\ref{chap:analysis} provides an analysis of 15 visibility in 2D and 3D polygonal scenes. This analysis also includes 16 formal description of visibility using \plucker coordinates of 17 lines. \plucker coordinates are exploited later in algorithms for 18 mutual visibility verification (Chapter~\ref{chap:mutual}). 19 20 Chapter~\ref{chap:online} describes a visibility culling algorithm 21 used to implement the online visibility culling module. This 22 algorithm can be used accelerate rendering of fully dynamic scenes 23 using recent graphics hardware. Chapter~\ref{chap:sampling} 24 describes global visibility sampling algorithm which forms a core of 25 the PVS computation module. This chapter also describes view space 26 partitioning algorithms used in close relation with the PVS 27 computation. Finally, Chapter~\ref{chap:mutual} describes mutual 28 visibility verification algorithms, which are used by the PVS 29 computation module to generate the final solution for precomputed 30 visibility. 31 15 32 16 33 … … 127 144 we obtain a vector that represents the same line $l$. More details 128 145 about this singularity-free mapping will be discussed in 129 Chapter~\ref{chap: vfr2d}.146 Chapter~\ref{chap:analysis}. 130 147 131 148 … … 207 224 208 225 Although the \plucker coordinates need more coefficients they have no 209 singularity and preserve some linearities: lines intersecting a set of 210 lines in 3D correspond to an intersection of 5D hyperplanes. More details211 on \plucker coordinates will be discussed in212 Chapter s~\ref{chap:vfr25d} and~\ref{chap:vfr3d} where they are used to213 solve the from-region visibility problem.226 singularity and preserve some linearities: lines intersecting a set of 227 lines in 3D correspond to an intersection of 5D hyperplanes. More 228 details on \plucker coordinates will be discussed in 229 Chapter~\ref{chap:analysis} and Chapter~\ref{chap:mutual} where they 230 are used to solve the from-region visibility problem. 214 231 215 232 To sum up: In 3D there are four degrees of freedom in the … … 627 644 the nature of the given problem and it should assist in finding 628 645 relationships between visibility problems and algorithms in different 629 application areas. The tools address the following classes of646 application areas. The algorithms address the following classes of 630 647 visibility problems: 631 648 … … 641 658 discussed important steps in the design of a visibility algorithm that 642 659 should also assist in understanding the quality of a visibility 643 algorithm. According to the classification the tools address 644 algorithms with the following properties: 660 algorithm. According to the classification the visibility algorithms 661 described later in the report address algorithms with the following 662 properties: 645 663 646 664 \begin{itemize} 647 665 \item Domain: 648 666 \begin{itemize} 649 \item viewpoint (online culling tool),650 \item global visibility (global visibility sampling tool)651 \item polygon or polyhedron (mutual visibility tool)667 \item viewpoint (online visibility culling), 668 \item global visibility (global visibility sampling) 669 \item polygon or polyhedron (mutual visibility verification) 652 670 \end{itemize} 653 671 \item Scene restrictions (occluders): … … 671 689 \item Solution space: 672 690 \begin{itemize} 673 \item discrete (online culling tool, global visibility sampling tool, conservative and approximate algorithm from the mutual visibility tool)674 \item continuous (exact algorithm from mutual visibility tool)691 \item discrete (online visibility culling, global visibility sampling, conservative and approximate algorithm from the mutual visibility verification) 692 \item continuous (exact algorithm from mutual visibility verification) 675 693 \end{itemize} 676 \item Solution space data structures: viewport (online culling tool), ray stack (global visibility sampling tool, conservative and approximate algorithm from the mutual visibility tool), BSP tree (exact algorithm from the mutual visibility tool)694 \item Solution space data structures: viewport (online visibility culling), ray stack (global visibility sampling, conservative and approximate algorithm from the mutual visibility verification), BSP tree (exact algorithm from the mutual visibility verification) 677 695 \item Use of coherence of visibility: 678 696 \begin{itemize} 679 \item spatial coherence (all tools)680 \item temporal coherence (online culling tool)697 \item spatial coherence (all algorithms) 698 \item temporal coherence (online visibility culling) 681 699 \end{itemize} 682 \item Output sensitivity: expected in practice (all tools)683 \item Acceleration data structure: kD-tree (all tools)684 \item Use of graphics hardware: online culling tool700 \item Output sensitivity: expected in practice (all algorithms) 701 \item Acceleration data structure: kD-tree (all algorithms) 702 \item Use of graphics hardware: online visibility culling 685 703 \end{itemize} 686 704 -
trunk/VUT/doc/SciReport/mutual.tex
r273 r277 1 \chapter{Mutual Visibility Tool} 2 3 4 If a fast visibility solution is required such as during the 5 development cycle then the aggresive visibility sampling discussed in 6 the previous chapter is a good candidate. However for final solution 7 (production) we should either be sure that there can be no visible 8 error due to the preprocessed visibility or the error is suffieciently 9 small and thus not noticable. 10 11 The verification starts with identifiying a minimal set of object/view 12 cell pairs which are classified as mutually invisible. We process the 13 objects sequentially and for every given object we determine the view 14 cells which form the boundary of the invisible view cell set. 1 \chapter{Mutual Visibility Verification} 2 3 \label{chap:mutual} 4 5 The aggressive visibility sampling discussed in the previous chapter is 6 a good candidate if a fast visibility solution is required (e.g. 7 during the development cycle). However for final solution (production) 8 we should either be sure that there can be no visible error due to the 9 preprocessed visibility or the error is sufficiently small and thus 10 not noticeable. 11 12 The mutual visibility verification starts with identifying a minimal 13 set of object/view cell pairs which are classified as mutually 14 invisible. We process the objects sequentially and for every given 15 object we determine the view cells which form the boundary of the 16 invisible view cell set. 15 17 16 18 This boundary it is based on the current view cell visibility … … 35 37 Below, we describe three different mutual visibility verification 36 38 algorithms. The first algorithm which is described in most detail 37 computes exact visibility. The second one is a co servative algorithm39 computes exact visibility. The second one is a conservative algorithm 38 40 and the third one is an approximate algorithm with a guaranteed error 39 41 bound. … … 53 55 54 56 The occlusion tree for the visibility from region problem is a 5D BSP 55 tree maintaining a collection of the 5D blocker polyhedra. The tree is 56 constructed for each source polygon $P_S$ and represents all rays 57 emerging from $P_S$. Each node $N$ of the tree represents a subset of 58 line space ${\mathcal Q^*_N}$. The root of the tree represents the 59 whole problem-relevant line set ${\mathcal L}_R$. If $N$ is an 60 interior node, it is associated with a \plucker plane $\pplane_N$. 61 Left child of $N$ represents ${\mathcal Q^*_N} \cap \pplane^-_N$, 62 right child ${\mathcal Q^*_N} \cap \pplane^+_N$, where $\pplane^-_N$ 63 and $\pplane^+_N$ are halfspaces induced by $\pplane_N$. 57 tree maintaining a collection of the 5D blocker 58 polyhedra~\cite{bittner:02:phd}. The tree is constructed for each 59 source polygon $P_S$ and represents all rays emerging from $P_S$. Each 60 node $N$ of the tree represents a subset of line space ${\mathcal 61 Q^*_N}$. The root of the tree represents the whole problem-relevant 62 line set ${\mathcal L}_R$. If $N$ is an interior node, it is 63 associated with a \plucker plane $\pplane_N$. Left child of $N$ 64 represents ${\mathcal Q^*_N} \cap \pplane^-_N$, right child ${\mathcal 65 Q^*_N} \cap \pplane^+_N$, where $\pplane^-_N$ and $\pplane^+_N$ are 66 halfspaces induced by $\pplane_N$. 64 67 65 68 Leaves of the occlusion tree are classified $in$ or $out$. If $N$ is … … 79 82 are represented by polyhedra P1, P2 and P3. The occlusion tree represents a union 80 83 of the polyhedra. Note that P2 has been split during the insertion process.} 81 \label{fig: duality2d}84 \label{fig:ot} 82 85 \end{figure} 83 86 … … 200 203 201 204 202 \s ection{Visibility test}205 \subsection{Visibility test} 203 206 204 207 \label{sec:rot3d_vistest} … … 216 219 tested for visibility by the traversal of the occlusion tree. The 217 220 visibility test proceeds as the algorithms described in 218 Section~\ref{sec:rot3d_construct}, but no modifications to the tree are219 performed. If the polyhedron is classified as visible in all reached 220 leaves, the current face is fully visible. If the polyhedron is 221 i nvisible in all reached leaves, the face is invisible. Otherwise it222 i s partially visible since some rays connecting the face and the221 Section~\ref{sec:rot3d_construct}, but no modifications to the tree 222 are performed. If the polyhedron is classified as visible in all 223 reached leaves, the current face is fully visible. If the polyhedron 224 is invisible in all reached leaves, the face is invisible. Otherwise 225 it is partially visible since some rays connecting the face and the 223 226 source polygon are occluded and some are unoccluded. The visibility of 224 the whole region is computed using a combination of visibility states 225 of its boundary faces according to Table~\ref{table:vis_states}. 227 the whole region can computed using a combination of visibility states 228 of its boundary faces according to 229 Table~\ref{table:vis_states}. However, since we are interested only in 230 verification of mutual invisibility as soon as we find a first visible 231 fragment the test can be terminated. 232 233 \begin{table}[htb] 234 \begin{center} 235 \begin{tabular}{|c|c||c|} 236 \hline 237 Fragment A & Fragment B & A $\cup$ B \\ 238 \hline 239 \hline 240 F & F & F\\ 241 \hline 242 I & I & I\\ 243 \hline 244 I & F & P\\ 245 \hline 246 F & I & P\\ 247 \hline 248 P & $*$ & P\\ 249 \hline 250 $*$ & P & P\\ 251 \hline 252 \end{tabular} 253 \begin{tabular}{cl} 254 I & -- invisible\\ 255 P & -- partially visible\\ 256 F & -- fully visible\\ 257 $*$ & -- any of the I,P,F states 258 \end{tabular} 259 \end{center} 260 \caption{Combining visibility states of two fragments.} 261 \label{table:vis_states} 262 \end{table} 226 263 227 264 … … 342 379 visibility verifier described above. The verifier is an extension of 343 380 the strong occlusion algorithm of Cohen-Or et 344 al.~\cite{cohen-egc-98}. In particular our ver fier refines the search381 al.~\cite{cohen-egc-98}. In particular our verifier refines the search 345 382 for a strong occluder by using a hierarchical subdivision of space of 346 383 lines connecting the two regions tested for mutual … … 359 396 \item If the rays do not intersect a single convex object four new 360 397 shafts are constructed by splitting both regions in half and the 361 process is repeated recursivel ly.398 process is repeated recursively. 362 399 \end{itemize} 363 400 364 401 If the subdivision does not terminate till reaching a predefined 365 subdivision depth, we conservativel ly classify the tested regions as402 subdivision depth, we conservatively classify the tested regions as 366 403 mutually visible. 367 404 … … 425 462 \label{sec:rot3d_summary} 426 463 427 This chapter presented a mutual visibility tool, which determines464 This chapter presented a mutual visibility verification algorithms, which determine 428 465 whether two regions in the scene are visible. 429 466 430 First, we have described an advanced exact visibility algorithm which431 is asimple of modification of an algorithm for computing from-region467 First, we have described an exact visibility algorithm which is a 468 simple of modification of an algorithm for computing from-region 432 469 visibility in polygonal scenes. The key idea is a hierarchical 433 470 subdivision of the problem-relevant line set using \plucker … … 449 486 Second, we have described a conservative verifier which is an 450 487 extension of the algorithm based on strong occluders. The conservative 451 verifier is requires a specification of the shaft size at which the 452 tested regions are conservativelly classfied as visible. 453 454 Third, we have described an approximate error bound verifier which is 455 an extension of the algorithm based on strong occluders. The 456 conservative verifier is requires a specification of the shaft size at 457 which the tested regions are conservativelly classfied as visible. 458 488 verifier requires a specification of the shaft size at which the 489 tested regions are conservatively classified as visible. 490 491 Third, we have described an approximate error bound verifier which 492 extends the conservative verifier by using automatic termination 493 criteria based on the estimation of maximal error for the given shaft. 494 -
trunk/VUT/doc/SciReport/online.tex
r273 r277 1 \chapter{Online Culling Tool}1 \chapter{Online Visibility Culling} 2 2 %***************************************************************************** 3 \label{chap:online} 3 4 4 5 \section{Introduction} … … 74 75 simplicity and versatility: the method can be easily integrated in 75 76 existing real-time rendering packages on architectures supporting the 76 underlying occlusion query . In77 underlying occlusion query~\cite{wimmer05:gpugems}. In 77 78 figure~\ref{fig:online_culling_example}, the same scene (top row) is 78 79 rendered using view frustum culling (visualization in the bottom left 79 image) versus online culling using occlusion queries (visualization 80 in the bottom right image). It can be seen that with view frustum 81 cullingonly many objects are still rendered.80 image) versus online culling using occlusion queries (visualization in 81 the bottom right image). It can be seen that with view frustum culling 82 only many objects are still rendered. 82 83 %Using spatial and assuming temporal coherence 83 84 … … 177 178 %% occlusion trees~\cite{bittner98b_long}. 178 179 179 On a theoretical level, our paperis related to methods aiming to180 On a theoretical level, our method is related to methods aiming to 180 181 exploit the temporal coherence of visibility. Greene et 181 182 al.~\cite{Greene93a} used the set of visible objects from one frame to … … 194 195 acceleration technique for exploiting spatial and temporal coherence 195 196 in hierarchical visibility algorithms. The central idea, which is also 196 vital for the online culling tool, is to avoid repeated visibility tests of197 interior nodes of the hierarchy. The problem of direct adoptionof198 this method is that it is designed for the use with instantaneous CPU 199 based occlusion queries, whereas hardware occlusion queries exhibit 200 significant latency. The method presented herein efficiently overcomes 201 the problem of latency while keeping the benefits of a generality and 202 simplicity of the original hierarchical technique. As a result we 203 obtain a simple and efficient occlusion culling algorithm utilizing 204 hardware occlusion queries.197 vital for the online occlusion culling, is to avoid repeated 198 visibility tests of interior nodes of the hierarchy. The problem of 199 direct adoption of this method is that it is designed for the use with 200 instantaneous CPU based occlusion queries, whereas hardware occlusion 201 queries exhibit significant latency. The method presented herein 202 efficiently overcomes the problem of latency while keeping the 203 benefits of a generality and simplicity of the original hierarchical 204 technique. As a result we obtain a simple and efficient occlusion 205 culling algorithm utilizing hardware occlusion queries. 205 206 206 207 … … 209 210 210 211 211 The rest of the paper is organized as follows:212 Section~\ref{sec:hwqueries} discusses hardware supported occlusion213 queries and a basic application of these queries using a kD-tree.214 Section~\ref{sec:hoq} presents our new algorithm and215 Section~\ref{sec:optimizations} describes several additional216 optimizations. Section~\ref{sec:results} presents results obtained by217 experimental evaluation of the method and discusses its218 behavior. Finally Section~\ref{sec:conclusion} concludes the paper.212 % The rest of the chapter is organized as follows: 213 % Section~\ref{sec:hwqueries} discusses hardware supported occlusion 214 % queries and a basic application of these queries using a kD-tree. 215 % Section~\ref{sec:hoq} presents our new algorithm and 216 % Section~\ref{sec:optimizations} describes several additional 217 % optimizations. Section~\ref{sec:results} presents results obtained by 218 % experimental evaluation of the method and discusses its 219 % behavior. Finally Section~\ref{sec:conclusion} concludes the chapter. 219 220 220 221 … … 1119 1120 \begin{figure*}[htb] 1120 1121 \centerline{ 1121 \includegraphics[width=0.4\textwidth,draft=\DRAFTFIGS]{images/teapots} 1122 } 1122 \hfill 1123 \includegraphics[height=0.3\textwidth,draft=\DRAFTFIGS]{images/teapots} 1124 \hfill 1125 \includegraphics[height=0.3\textwidth,draft=\DRAFTFIGS]{images/city} 1126 \hfill 1127 } 1123 1128 \centerline{ 1124 \includegraphics[width=0.4\textwidth,draft=\DRAFTFIGS]{images/city} 1125 } 1126 \centerline{ 1127 \includegraphics[width=0.4\textwidth,draft=\DRAFTFIGS]{images/pplant} 1128 } 1129 1130 \caption{The test scenes: the teapots, the city, and the power plant.} 1131 \label{fig:scenes} 1129 \includegraphics[width=0.35\textwidth,draft=\DRAFTFIGS]{images/pplant} 1130 } 1131 1132 \caption{The test scenes: the teapots, the city, and the power plant.} 1133 \label{fig:scenes} 1132 1134 \end{figure*} 1133 1135 -
trunk/VUT/doc/SciReport/report.tex
r273 r277 8 8 \usepackage{times} 9 9 \usepackage{a4wide} 10 \usepackage{pdfpages} 11 \usepackage{float} 10 12 11 13 % For backwards compatibility to old LaTeX type font selection. … … 128 130 129 131 132 \makeindex 130 133 131 134 %------------------------------------------------------------------------- 132 135 \begin{document} 133 136 \setcounter{tocdepth}{1} 137 \includepdf[pages=1-2]{cover.pdf} 138 \pagenumbering{roman} 134 139 \tableofcontents 140 \pagenumbering{arabic} 141 135 142 136 143 % --------------------------------------------------------------------- … … 162 169 163 170 164 \maketitle171 %\maketitle 165 172 166 173 \include{introduction} -
trunk/VUT/doc/SciReport/sampling.tex
r273 r277 1 \chapter{Global Visibility Sampling Tool} 2 1 \chapter{Global Visibility Sampling} 2 3 \label{chap:sampling} 3 4 4 5 The proposed visibility preprocessing framework consists of two major … … 6 7 7 8 \begin{itemize} 8 \item The first step is an aggres ive visibility sampling which gives9 \item The first step is an aggressive visibility sampling which gives 9 10 initial estimate about global visibility in the scene. The sampling 10 11 itself involves several strategies which will be described bellow. 11 The impor ant property of the aggresive sampling step is that it12 The important property of the aggressive sampling step is that it 12 13 provides a fast progressive solution to global visibility and thus it 13 14 can be easily integrated into the game development cycle. The 14 aggres ive sampling will terminate when the average contribution of new15 aggressive sampling will terminate when the average contribution of new 15 16 ray samples falls bellow a predefined threshold. 16 17 17 18 \item The second step is mutual visibility verification. This step 18 turns the previous aggres ive visibility solution into either exact,19 conservative or error bound aggres ive solution. The choice of the19 turns the previous aggressive visibility solution into either exact, 20 conservative or error bound aggressive solution. The choice of the 20 21 particular verifier is left on the user in order to select the best 21 22 one for a particular scene, application context and time 22 23 constrains. For example, in scenes like a forest an error bound 23 aggres ive visibility can be the best compromise between the resulting24 size of the PVS (and frame rate) and the visual quality. The exact or24 aggressive visibility can be the best compromise between the resulting 25 size of the PVS (and frame rate) and the visual quality. The exact or 25 26 conservative algorithm can however be chosen for urban scenes where 26 ommision of even small objects can be more distructing for the 27 user. The mutual visibility tool will be described in the next chapter. 27 omission of even small objects can be more distracting for the 28 user. The mutual visibility verification will be described in the next 29 chapter. 28 30 29 31 \end{itemize} … … 31 33 In traditional visibility preprocessing the view space is 32 34 subdivided into view cells and for each view cell the set of visible 33 objects --- potentially visible set (PVS) is computed. This framewo irk34 has been used for conservative, aggres ive and exact algorithms.35 objects --- potentially visible set (PVS) is computed. This framework 36 has been used for conservative, aggressive and exact algorithms. 35 37 36 38 We propose a different strategy which has several advantages for 37 sampling based aggres ive visibility preprocessing. The stategy is39 sampling based aggressive visibility preprocessing. The strategy is 38 40 based on the following fundamental ideas: 39 41 \begin{itemize} … … 63 65 the partitioning. The major drawback of this approach is that for 64 66 polygonal scenes with $n$ polygons there can be $\Theta(n^9)$ cells in 65 the partitioning for unrestricted view space. A {\em scale space}67 the partitioning for unrestricted view space. A {\em scale space} 66 68 aspect graph~\cite{bb12595,bb12590} improves robustness of the method 67 69 by merging similar features according to the given scale. … … 121 123 The exact mutual visibility method presented later in the report is 122 124 based on method exploting \plucker coordinates of 123 lines~\cite{bittner 02phd,nirenstein:02:egwr,haumont2005}. This125 lines~\cite{bittner:02:phd,nirenstein:02:egwr,haumont2005:egsr}. This 124 126 algorithm uses \plucker coordinates to compute visibility in shafts 125 127 defined by each polygon in the scene. … … 242 244 243 245 244 \begin{figure}[htb]245 \centerline{246 \includegraphics[height=0.35\textwidth,draft=\DRAFTFIGS]{images/vienna_viewcells_01}247 \includegraphics[height=0.35\textwidth,draft=\DRAFTFIGS]{images/vienna_viewcells_07}248 }249 250 \caption{(left) Vienna viewcells. (right) The view cells are prisms with a triangular base. }251 \label{fig:vienna_viewcells}252 \end{figure}253 246 254 247 … … 257 250 partition of the scene into view cells is an essential part of every 258 251 visibility system. If they are chosen too large, the PVS (Potentially 259 Visib ible Set) of a view cells is too large for efficient culling. If252 Visible Set) of a view cells is too large for efficient culling. If 260 253 they are chosen too small or without consideration, then neighbouring 261 254 view cells contain redundant PVS information, hence boosting the PVS … … 266 259 height to form a view cell prism. 267 260 261 \begin{figure}[htb] 262 \centerline{ 263 \includegraphics[height=0.35\textwidth,draft=\DRAFTFIGS]{images/vienna_viewcells_01} 264 \includegraphics[height=0.35\textwidth,draft=\DRAFTFIGS]{images/vienna_viewcells_07} 265 } 266 267 \caption{(left) Vienna view cells. (right) The view cells are prisms with a triangular base. } 268 \label{fig:vienna_viewcells} 269 \end{figure} 270 268 271 In order to efficiently use view cells with our sampling method, we 269 272 require a view cell representation which is … … 275 278 \end{itemize} 276 279 277 We meet these requirements by employing spatial subdivisions (i.e., 278 KD trees and BSP trees), to store the view cells. The initial view cells are 279 associated with the leaves. The reason why we chose BSP trees as view cell representation 280 is that they are very flexible. View cells forming arbitrary closed meshes can be closely matched. 281 Therefore we are able to find a view cells with only a few view ray-plane 282 intersections. Furthermore, the hierarchical structures can be 283 exploited as hierarchy of view cells. Interior nodes form larger view cells 284 containing the children. If neccessary, a leaf can be easily subdivided into smaller view cells. 285 286 Currently we consider three different approaches to generate the initial view cell BSP tree. 287 The third method is not restricted to BSP trees, but BSP trees are prefered 288 because of their greater flexibility. 280 We meet these requirements by employing spatial subdivisions (i.e., 281 KD trees and BSP trees), to store the view cells. The initial view 282 cells are associated with the leaves. The reason why we chose BSP 283 trees as view cell representation is that they are very 284 flexible. View cells forming arbitrary closed meshes can be closely 285 matched. Therefore we are able to find a view cells with only a few 286 view ray-plane intersections. Furthermore, the hierarchical 287 structures can be exploited as hierarchy of view cells. Interior nodes 288 form larger view cells containing the children. If necessary, a leaf 289 can be easily subdivided into smaller view cells. 290 291 Currently we consider three different approaches to generate the 292 initial view cell BSP tree. The third method is not restricted to BSP 293 trees, but BSP trees are preferred because of their greater 294 flexibility. 289 295 290 296 … … 303 309 \#splits & 25 & 188936\\\hline\hline 304 310 max tree depth & 13 & 27\\\hline\hline 305 avg tree depth & 9.747 & 21.11\\\hline\hli en311 avg tree depth & 9.747 & 21.11\\\hline\hline 306 312 307 313 \end{tabular} … … 312 318 \begin{itemize} 313 319 314 \item We use a number of input view cells given in advance. As input view cell 315 any closed mesh can be applied. The only requirement is that the 316 any two view cells do not overlap. 317 First the view cell polygons are extracted, and the BSP is build from 318 these polygons using some global optimizations like tree balancing or 319 least splits. Then one view cell after the other 320 is inserted into the tree to find out the leafes where they are contained 321 in. The polygons of the view cell are filtered down the tree, 322 guiding the insertion process. Once we reach a leaf and there are no 323 more polygons left, we terminate the tree subdivision. If we are on 324 the inside of the last split plane (i.e., the leaf represents the 325 inside of the view cell), we associate the leaf with the view cell 326 (i.e., add a pointer to the view cell). One input view cell can 327 be associated with many leafes, whereas each leafs has only one view cell. 328 Some statistics about using this method on the vienna view cells set are given in table~\ref{tab:viewcell_bsp}. 329 However, sometimes a good set of view cells is not available. Or the scene 330 is changed frequently, and the designer does not want to create new view cells on each change. 331 In such a case one of the following two methods should rather be chosen, which generate view cells 332 automatically. 320 \item We use a number of input view cells given in advance. As input 321 view cell any closed mesh can be applied. The only requirement is 322 that the any two view cells do not overlap. First the view cell 323 polygons are extracted, and the BSP is build from these polygons using 324 some global optimizations like tree balancing or least splits. Then 325 one view cell after the other is inserted into the tree to find out 326 the leaves where they are contained in. The polygons of the view cell 327 are filtered down the tree, guiding the insertion process. Once we 328 reach a leaf and there are no more polygons left, we terminate the 329 tree subdivision. If we are on the inside of the last split plane 330 (i.e., the leaf represents the inside of the view cell), we associate 331 the leaf with the view cell (i.e., add a pointer to the view 332 cell). One input view cell can be associated with many leaves, whereas 333 each leafs has only one view cell. Some statistics about using this 334 method on the Vienna view cells set are given in 335 table~\ref{tab:viewcell_bsp}. However, sometimes a good set of view 336 cells is not available. Or the scene is changed frequently, and the 337 designer does not want to create new view cells on each change. In 338 such a case one of the following two methods should rather be chosen, 339 which generate view cells automatically. 333 340 334 341 335 342 \item We apply a BSP tree subdivision to the scene geometry. Whenever 336 the subdivision terminates in a leaf, a view cell is associated with the leaf node. 337 This simple approach is justified because it places the view cell borders 338 along some discontinuities in the visibility function. 339 340 \item The view cell generation can be guided by the sampling process. We start with 341 with a single initial view cell representing the whole space. If a given threshold 342 is reached during the preprocessing (e.g., the view cell is pierced by too many rays 343 resulting in a large PVS), the view cell is subdivided into smaller cells using some criteria. 344 345 In order to evaluate the best split plane, we first have to define the characteristics of a 346 good view cell partition: The view cells should be quite large, while their PVS stays rather small. 347 The PVS of each two view cells should be as distinct as possible, otherwise they could be 348 merged into a larger view cell if the PVSs are too similar. 349 E.g., for a building, the perfect view cells are usually the single rooms connected by portals. 350 351 Hence we can define some useful criteria for the split: 1) the number of rays should be roughly equal among the new view cells. 2) The split plane should be chosen in a way that the rays are maximal distinct, i.e., the number 352 of rays contributing to more than one cell should be minimized => the PVSs are also distinct. 3) For BSP trees, 353 the split plane should be aligned with some scene geometry which is large enough to contribute 354 a lot of occlusion power. This criterium can be naturally combined with the second one. 355 As termination criterium we can choose the minimum PVS / piercing ray size or the maximal tree depth. 343 the subdivision terminates in a leaf, a view cell is associated with 344 the leaf node. This simple approach is justified because it places 345 the view cell borders along some discontinuities in the visibility 346 function. 347 348 \item The view cell generation can be guided by the sampling 349 process. We start with with a single initial view cell representing 350 the whole space. If a given threshold is reached during the 351 preprocessing (e.g., the view cell is pierced by too many rays 352 resulting in a large PVS), the view cell is subdivided into smaller 353 cells using some criteria. 354 355 In order to evaluate the best split plane, we first have to define the 356 characteristics of a good view cell partition: The view cells should 357 be quite large, while their PVS stays rather small. The PVS of each 358 two view cells should be as distinct as possible, otherwise they could 359 be merged into a larger view cell if the PVSs are too similar. E.g., 360 for a building, the perfect view cells are usually the single rooms 361 connected by portals. 362 363 Hence we can define some useful criteria for the split: 1) the number 364 of rays should be roughly equal among the new view cells. 2) The split 365 plane should be chosen in a way that the ray sets are disjoint, i.e., 366 the number of rays contributing to more than one cell should be 367 minimized. 3) For BSP trees, the split plane should be aligned with 368 some scene geometry which is large enough to contribute a lot of 369 occlusion power. This criterion can be naturally combined with the 370 second one. As termination criterion we can choose the minimum PVS / 371 piercing ray size or the maximal tree depth. 356 372 \end{itemize} 357 373 358 In the future we aim to extend the view cell construction by using 359 feedback from the PVS computation: the view cells which contain many 360 visibility changes will be subdivided further and neighboring view 361 cells with similar PVSs will be merged. We want to gain a more precise 362 information about visibility by selectivly storing rays with the363 viewcells and computing visibility statistics for subsets of rays 364 which intersect subregions of the given view cell. 365 374 375 % In the future we aim to extend the view cell construction by using 376 % feedback from the PVS computation: the view cells which contain many 377 % visibility changes will be subdivided further and neighboring view 378 % cells with similar PVSs will be merged. We want to gain a more precise 379 % information about visibility by selectively storing rays with the 380 % view cells and computing visibility statistics for subsets of rays 381 % which intersect subregions of the given view cell. 366 382 367 383 \subsection{From-object based visibility} … … 386 402 At this phase of the computation we not only start the samples from 387 403 the objects, but we also store the PVS information centered at the 388 objects. Instead of storing a PVS cons ting of objects visible from404 objects. Instead of storing a PVS consisting of objects visible from 389 405 view cells, every object maintains a PVS consisting of potentially 390 406 visible view cells. While these representations contain exactly the … … 399 415 of the algorithm visits scene objects sequentially. For every scene 400 416 object we randomly choose a point on its surface. Then a ray is cast 401 from the selected point according to the randomly chosen direction . We402 use a uniform distribution of the ray directions with respect tothe403 halfspace given by the surface normal. Using this strategy the samples 404 at deterministicaly placed at every object, with a randomization of 405 the location on the object surface. The uniformly distributed 406 direction is a simple and fast strategy to gain initial visibility 407 information.417 from the selected point according to the randomly chosen direction 418 (see Figure~\ref{fig:sampling}). We use a uniform distribution of the 419 ray directions with respect to the half space given by the surface 420 normal. Using this strategy the samples at deterministically placed at 421 every object, with a randomization of the location on the object 422 surface. The uniformly distributed direction is a simple and fast 423 strategy to gain initial visibility information. 408 424 409 425 \begin{figure}%[htb] 410 \includegraphics[width=0.6\textwidth, draft=\DRAFTFIGS]{figs/sampling} \\ 426 \centerline{ 427 \includegraphics[width=0.4\textwidth, draft=\DRAFTFIGS]{figs/sampling} 428 } 429 411 430 %\label{tab:online_culling_example} 412 431 \caption{Three objects and a set of view cells corresponding to leaves … … 414 433 samples cast from a selected object (shown in red). Note that most 415 434 samples contribute to more view cells. } 416 \label{fig: online_culling_example}435 \label{fig:sampling} 417 436 \end{figure} 418 437 … … 444 463 local view cell directional density. This means placing more samples at 445 464 directions where more view cells are located: We select a random 446 vie cell which lies at the halfpace given by the surface normal at the465 view cell which lies at the half space given by the surface normal at the 447 466 chosen point. We pick a random point inside the view cell and cast a 448 467 ray towards this point. … … 451 470 \subsection{Accounting for Visibility Events} 452 471 453 Visibility events correspond to appearance and disap earance of472 Visibility events correspond to appearance and disappearance of 454 473 objects with respect to a moving view point. In polygonal scenes the 455 474 events defined by event surfaces defined by three distinct scene 456 475 edges. Depending on the edge configuration we distinguish between 457 vertex-edge events (VE) and trip ple edge (EEE) events. The VE surfaces476 vertex-edge events (VE) and triple edge (EEE) events. The VE surfaces 458 477 are planar planes whereas the EEE are in general quadratic surfaces. 459 478 … … 467 486 connectivity information we select a random silhouette edge from this 468 487 object and cast a sample which is tangent to that object at the 469 selected edge .488 selected edge . 470 489 471 490 The second strategy works as follows: we randomly pickup two objects … … 483 502 \section{Summary} 484 503 485 This chapter described a global visibility sampling tool which forms a486 core of the visibility preprocessing framework. The global visibility 487 sampling computes aggresive visibility, i.e. it computes a subset of 488 the exact PVS for each view cell. The aggresive sampling provides a 489 fast progressive solution and thus it can be easily integrated into 490 the game development cycle. The sampling itself involves several 491 strategies which aim to pregressively discover more visibility 492 relationships in the scene.504 This chapter described the global visibility sampling algorithm which 505 forms a core of the visibility preprocessing framework. The global 506 visibility sampling computes aggressive visibility, i.e. it computes a 507 subset of the exact PVS for each view cell. The aggressive sampling 508 provides a fast progressive solution and thus it can be easily 509 integrated into the game development cycle. The sampling itself 510 involves several strategies which aim to progressively discover more 511 visibility relationships in the scene. 493 512 494 513 The methods presented in this chapter give a good initial estimate 495 514 about visibility in the scene, which can be verified by the mutual 496 visibility tooldescribed in the next chapter.497 498 499 500 515 visibility algorithms described in the next chapter. 516 517 518 519
Note: See TracChangeset
for help on using the changeset viewer.