diff --git a/Documentation/biblio/geom.bib b/Documentation/biblio/geom.bib index 49ae3a088f5..9e7cdb9a427 100644 --- a/Documentation/biblio/geom.bib +++ b/Documentation/biblio/geom.bib @@ -4167,7 +4167,7 @@ cell neighborhood in $O(m)$ time." , succeeds = "aarx-clgta-96" , update = "98.07 rote, 98.03 mitchell, 97.03 rote" , abstract = "Exploiting the concept of so-called light edges, we introduce - a new way of defining the greedy triangulation GT(S) of a point + a new way of defining the greedy triangulation GT(S) of a point set S. It provides a decomposition of GT(S) into levels, and the number of levels allows us to bound the total edge length of GT(S). In particular, we show @@ -12292,9 +12292,9 @@ method that uses very different techniques." , pages = "201--290" , url = "http://wwwpi6.fernuni-hagen.de/Publikationen/tr198.pdf" , succeeds = "ak-vd-96" -, cites = "ahknu-vdcfc-95, abms-claho-94, aesw-emstb-91, agss-ltacv-89, ahl-sqrpc-90, aiks-fkpmd-91, a-dppuv-82, aaag-ntsp-95, aa-skfgpf-95, aacktrx-tin-96, a-nemts-83, as-vdco-95, ay-aampt1-90, ay-aampt2-90, ar-cvddp-96, a-gvdps-89, a-lbvdc-98, abky-cabmm-88, abcw-cpdts-88, ab-rdt-85, a-sdcgp-85, a-pdpaa-87, a-rpccc-87, a-iadbu-88, a-lcpd-88, a-ndrcv-90, a-vdsfg-91, ae-oacwv-84, aha-mttls-92, ai-grvd-88, as-solri-92, abi-cvus-88, br-rasas-90, bo-arcgi-79, bs-dcms-76, bwy-oetac-80, be-mgot-92i, beg-pgmg-94, b-bnvd, bt-gatuf-85, bmt-dppbp-96, bcdt-osc3d-91i, bdsty-arsol-92, bg-tdrcs-93, bsty-vdhdc-95, bt-rcdt-93, bh-cpcoe-88, b-rgsdd-89, b-vdch-79, b-gtfga-80, bms-hcvdl-94, b-gog-55, c-vmpmp-85, cd-svd-88, crw-gc-91, csy-oscpf-95, cdns-nsrgs-95, c-ochaa-93, ce-iacko-87, cgt-ecabs-95, cx-alesw-96, c-bvdcp-86, c-cdt-89, c-tapga-89, c-gqmgc-93, - cd-vdbcd-85, ckstw-vdl3s-95, csw-fmasp-95, c-wcanh-76, cms-mfdca-94, c-narsc-87, c-agmst-89, cms-frric-93, cs-arscg-89, cmrs-tspvd-93, cw-pspp-86, dj-wtacg-89, ds-oiti-89, bms-plpco-93, dfnp-stdt-91, d-eaclm-77, dk-savd-87, dk-bspwa-97, dn-cgacp-85, d-slsv-34, d-pp-44, d-rysoa-92, dv-cprac-77, dds-saeid-92, de-apphd-96, d-nhndt-87, d-tdt-90, d-fhcdt-92, d-udrdp-50, dl-cvdrp-91, ddg-fsp-83, dfs-dgaag-90, dl-pmtds-89, dl-msp-76, d-ec-83, d-fdcac-87, d-hdvdl-91, e-acg-87, e-atccd-90, e-ubids-95, egs-ueplf-89, egs-oplms-86, eks-sspp-83, em-tdas-94, eos-calha-86, es-vda-86, ess-ztha-93, es-itfwr-96, es-otatd-91, et-qtaml-93, et-ubcdt-93, etw-otama-92, ei-drmwc-79, f-sodt-90, f-iwatd-86, f-nsa2d-92, f-vddt-92, f-savd-87, gs-nsagv-69, gj-cigtn-79, g-3dmud-95, g-agt-85, grr-vdlsm-95, grss-sracp-95, - goy-cvdsl-93, gs-cdtp-78, gks-ricdv-92, gmr-vdmpp-92, gs-pmgsc-85, h-ca-75, h-rvdlp-92, h-gbitp-91, hkp-itnc-91, hn-sc-89, hns-psscp-88, h-pcprn-91, hks-uevsi-93, h-oarms-79, iklm-cdf3s-93, iki-awvdr-94, iss-nriac-92, j-3dtlt-89, j-ctddt-91, j-gspgm-91, jrz-ccdt-91, km-accvd-91, km-icdmc-92, kk-cdtmb-88, kg-cgwac-92, k-csmwt-94, ks-tpscv-93, k-eccs-79, k-ndot-80, k-osps-83, kr-fcm-85, k-cddvd-80, k-cavd-89, kl-ltrab-93, kl-fsc-95, kl-mpsp-95, kmm-ricav-93a, kw-vdbgm-88, k-sssgt-56, kl-fsacl-95, l-dtmmi-94, l-esta-94, l-scsi-77, l-vdlmh-94, l-ricsa-95, l-tdvdl-80, l-matps-82, l-knnvd-82, ld-gvdp-81, ll-gdtpg-86, lw-vdllm-80, lk-qgtam-96, l-ltcrn-94, msw-sblp-92, ms-pggrg-80, m-dtchn-84, mr-vdcdg-90, mks-cplvd-96, m-tdird-76, m-mnfcp-70, m-lpltw-84, mmo-cavd-91, m-zkav-93, +, cites = "ahknu-vdcfc-95, abms-claho-94, aesw-emstb-91, agss-ltacv-89, ahl-sqrpc-90, aiks-fkpmd-91, a-dppuv-82, aaag-ntsp-95, aa-skfgpf-95, aacktrx-tin-96, a-nemts-83, as-vdco-95, ay-aampt1-90, ay-aampt2-90, ar-cvddp-96, a-gvdps-89, a-lbvdc-98, abky-cabmm-88, abcw-cpdts-88, ab-rdt-85, a-sdcgp-85, a-pdpaa-87, a-rpccc-87, a-iadbu-88, a-lcpd-88, a-ndrcv-90, a-vdsfg-91, ae-oacwv-84, aha-mttls-92, ai-grvd-88, as-solri-92, abi-cvus-88, br-rasas-90, bo-arcgi-79, bs-dcms-76, bwy-oetac-80, be-mgot-92i, beg-pgmg-94, b-bnvd, bt-gatuf-85, bmt-dppbp-96, bcdt-osc3d-91i, bdsty-arsol-92, bg-tdrcs-93, bsty-vdhdc-95, bt-rcdt-93, bh-cpcoe-88, b-rgsdd-89, b-vdch-79, b-gtfga-80, bms-hcvdl-94, b-gog-55, c-vmpmp-85, cd-svd-88, crw-gc-91, csy-oscpf-95, cdns-nsrgs-95, c-ochaa-93, ce-iacko-87, cgt-ecabs-95, cx-alesw-96, c-bvdcp-86, c-cdt-89, c-tapga-89, c-gqmgc-93, + cd-vdbcd-85, ckstw-vdl3s-95, csw-fmasp-95, c-wcanh-76, cms-mfdca-94, c-narsc-87, c-agmst-89, cms-frric-93, cs-arscg-89, cmrs-tspvd-93, cw-pspp-86, dj-wtacg-89, ds-oiti-89, bms-plpco-93, dfnp-stdt-91, d-eaclm-77, dk-savd-87, dk-bspwa-97, dn-cgacp-85, d-slsv-34, d-pp-44, d-rysoa-92, dv-cprac-77, dds-saeid-92, de-apphd-96, d-nhndt-87, d-tdt-90, d-fhcdt-92, d-udrdp-50, dl-cvdrp-91, ddg-fsp-83, dfs-dgaag-90, dl-pmtds-89, dl-msp-76, d-ec-83, d-fdcac-87, d-hdvdl-91, e-acg-87, e-atccd-90, e-ubids-95, egs-ueplf-89, egs-oplms-86, eks-sspp-83, em-tdas-94, eos-calha-86, es-vda-86, ess-ztha-93, es-itfwr-96, es-otatd-91, et-qtaml-93, et-ubcdt-93, etw-otama-92, ei-drmwc-79, f-sodt-90, f-iwatd-86, f-nsa2d-92, f-vddt-92, f-savd-87, gs-nsagv-69, gj-cigtn-79, g-3dmud-95, g-agt-85, grr-vdlsm-95, grss-sracp-95, + goy-cvdsl-93, gs-cdtp-78, gks-ricdv-92, gmr-vdmpp-92, gs-pmgsc-85, h-ca-75, h-rvdlp-92, h-gbitp-91, hkp-itnc-91, hn-sc-89, hns-psscp-88, h-pcprn-91, hks-uevsi-93, h-oarms-79, iklm-cdf3s-93, iki-awvdr-94, iss-nriac-92, j-3dtlt-89, j-ctddt-91, j-gspgm-91, jrz-ccdt-91, km-accvd-91, km-icdmc-92, kk-cdtmb-88, kg-cgwac-92, k-csmwt-94, ks-tpscv-93, k-eccs-79, k-ndot-80, k-osps-83, kr-fcm-85, k-cddvd-80, k-cavd-89, kl-ltrab-93, kl-fsc-95, kl-mpsp-95, kmm-ricav-93a, kw-vdbgm-88, k-sssgt-56, kl-fsacl-95, l-dtmmi-94, l-esta-94, l-scsi-77, l-vdlmh-94, l-ricsa-95, l-tdvdl-80, l-matps-82, l-knnvd-82, ld-gvdp-81, ll-gdtpg-86, lw-vdllm-80, lk-qgtam-96, l-ltcrn-94, msw-sblp-92, ms-pggrg-80, m-dtchn-84, mr-vdcdg-90, mks-cplvd-96, m-tdird-76, m-mnfcp-70, m-lpltw-84, mmo-cavd-91, m-zkav-93, m-uuam-28, m-rcvdp-93, m-spop-93, mmp-dgp-87, ms-getcc-88, mp-fitcp-78, m-osclv-90, m-lavd-91, osy-gvdl-86, osy-gvdl-87, oy-rmpmd-85, oim-iimvd-84, obs-stcav-92, p-etspi-77, pl-ecgvd-95, p-kpudz-82, p-mrpdt-92, ps-cgi-85, p-scnsg-57, r-odtr-91, rr-oprav-94, r-aiv-94, r-mrpdt-90, r-tbvdm-93, rsl-ashts-77, st-pwvt-88, s-cvdhd-82, s-mplbc-85, s-chdch-86, s-nfhdv-87, s-cdtvd-88, s-sdlpc-91, s-barga-93, s-cg-78, sh-cpp-75, s-icpps-85, s-atubl-94, s-let-78, s-vidt-80, s-sagdt-91, s-facdt-87, s-mmdpsl-91a, sd-csdta-95, s-smane-92, si-cvdom-92, soi-toari-90, s-rngam-83, si-atpvd-86, t-otdt-93, too-natdv-83, t-gcvdm-86, t-rngfp-80, v-mstkd-88, v-sgagc-91, v-dmrsl-09, v-nadpc-08, w-eucdt-93, ws-oacdt-87, w-cnddt-81, w-sedbe-91a, www-sdpfo-87, y-cmstk-82, y-amp-87, y-oavds-87, zm-sdnah-91, ZZZ" , update = "00.11 smid, 00.03 bibrelex, 99.03 bibrelex, 98.11 bibrelex, 98.07 mitchell, 98.03 bibrelex, 97.03 icking" , annote = "Chapter 5 of su-hcg-00" @@ -12847,9 +12847,9 @@ method that uses very different techniques." , url = "http://www.ifor.math.ethz.ch/staff/fukuda/fukuda.html" , update = "98.03 houle, 97.03 pocchiola, 96.05 fukuda" , annote = "Reverse search is a general exhaustive search technique -which came out of the new convex hull algorithm by the authors. +which came out of the new convex hull algorithm by the authors. This technique can be applied to many enumeration problems in computer -science, operations research and geometry. +science, operations research and geometry. It is highly suitable for parallelization." } @@ -16291,7 +16291,7 @@ rendering. Contains pseudocode." , number = 4 , year = 1997 , note = "Special issue on parallel I/O. An earlier version - appears in Proc. of the 8th Annual + appears in Proc. of the 8th Annual ACM Symposium on Parallel Algorithms and Architectures (SPAA~'96), Padua, Italy, June 1996, 109--118" , update = "97.03 murali" @@ -18449,12 +18449,12 @@ the interior. Contains pseudocode." , succeeds = "d-rld-89" , update = "98.07 bibrelex, 98.03 mitchell, 93.09 held" , annote = "He considers rectilinear paths in a rectilinear simple polygon. In $O(n\log n)$ - preprocessing time and space, he builds a data structure that supports - $O(\log n)$ time queries for distance between two points ($O(1)$ time between - two polygon vertices). He is actually searching for paths that are``smallest'' in - that they are shortest simultaneously in rectilinear link distance and - $L_1$ length (which is always possible). See improvements to $O(n)$ time and - space by Lingas, Maheshwari, and Sack~\cite{lms-parld-95} and + preprocessing time and space, he builds a data structure that supports + $O(\log n)$ time queries for distance between two points ($O(1)$ time between + two polygon vertices). He is actually searching for paths that are``smallest'' in + that they are shortest simultaneously in rectilinear link distance and + $L_1$ length (which is always possible). See improvements to $O(n)$ time and + space by Lingas, Maheshwari, and Sack~\cite{lms-parld-95} and Schuierer~\cite{s-odssr-96}." } @@ -20044,6 +20044,16 @@ $O(n^2)$ in the plane." , keywords = "Delaunay triangulation, probabilistic analysis" } +@article{bfhhm-epsph-15 +, author = "A. Baram and E. Fogel and D. Halperin and M. Hemmer and S. Morr" +, title = "Exact {M}inkowski sums of Polygons With Holes" +, journal = "Internat. J. Comput. Geom. Appl." +, volume = 1 +, year = 2015 +, pages = "01--01" +, note = "To appear" +} + @techreport{bg-dpd-91 , author = "M. Bern and J. Gilbert" , title = "Drawing the planar dual" @@ -21011,10 +21021,10 @@ cubes with side-lengths not exceeding 1 in the $3$-dimensional euclidean space. Let $S$ and $T$ be two points lying outside the open cubes. Assume one needs to find a short path emanating from $S$ and terminating at $T$ avoiding the cubes of $\cal P$ -under the restriction that the cubes are not known prior to the search. +under the restriction that the cubes are not known prior to the search. In fact the positions and the side-lengths of the cubes become known to the searcher as the cubes are contacted. We give an algorithm to -construct a path of length less than +construct a path of length less than $\frac 32 d + 3 \sqrt 3 \log d + 5$, where $d > 3 \sqrt 3$ denotes the distance between S and T." } @@ -24325,7 +24335,7 @@ experimental results are given." , abstract = "This paper presents the main algorithmic and design choices that have been made -to implement triangulations in +to implement triangulations in the computational geometry algorithms library CGAL." } @@ -25614,9 +25624,9 @@ present a polynomial-time exact algorithm to solve this problem." rectangles floating in 3-space, with edges represented by vertical lines of sight. We apply an extension of the {Erd\H os}-Szekeres Theorem in a geometric setting to obtain an - upper bound of 56 for size of the largest complete graph which - is representable. On the other hand, we construct a - representation of the complete graph with 22 vertices. + upper bound of 56 for size of the largest complete graph which + is representable. On the other hand, we construct a + representation of the complete graph with 22 vertices. These are the best existing bounds." } @@ -28630,7 +28640,7 @@ determinants." , year = 1993 , update = "98.03 mitchell" , abstract = "We calculate the partition - of the configuration space $I\!\!R^2 x S^1$ + of the configuration space $I\!\!R^2 x S^1$ of a car-like robot, only moving forwards, with respect to the type of the length optimal paths. This kind of robot is subject to kinematic constraints on its path curvature and its orientation. @@ -33579,35 +33589,35 @@ determinants." Given a set of polygonal obstacles of $n$ vertices in the plane, the problem of processing the all-pairs Euclidean {\em short} path queries is that of reporting an obstacle-avoiding path $P$ (or -its length) between two arbitrary query points $p$ and $q$ in the -plane, such that the length of $P$ is within a small factor of the +its length) between two arbitrary query points $p$ and $q$ in the +plane, such that the length of $P$ is within a small factor of the length of a Euclidean {\em shortest} obstacle-avoiding path between $p$ and $q$. The goal is to answer each short path query quickly -by constructing data structures that capture path information in -the obstacle-scattered plane. For the related all-pairs Euclidean -{\em shortest} path problem, the best known algorithms for even -very simple cases (e.g., {\em rectilinear} shortest paths among +by constructing data structures that capture path information in +the obstacle-scattered plane. For the related all-pairs Euclidean +{\em shortest} path problem, the best known algorithms for even +very simple cases (e.g., {\em rectilinear} shortest paths among disjoint {\em rectangular} obstacles in the plane) require at least quadratic space and time to construct a data structure, -so that a length query can be answered in polylogarithmic time. -The previously best known solution to the all-pairs Euclidean -{\em short} path problem also uses a data structure of quadratic -space and superquadratic construction time, in order to answer a -length query in polylogarithmic time. In this paper, we present a -data structure that requires nearly linear space and takes subquadratic -time to construct. Precisely, for any given $\epsilon$ satisfying +so that a length query can be answered in polylogarithmic time. +The previously best known solution to the all-pairs Euclidean +{\em short} path problem also uses a data structure of quadratic +space and superquadratic construction time, in order to answer a +length query in polylogarithmic time. In this paper, we present a +data structure that requires nearly linear space and takes subquadratic +time to construct. Precisely, for any given $\epsilon$ satisfying $0$ $<$ $\epsilon$ $\leq$ $1$, our data structure can be built -in $o(q^{3/2})$ $+$ $O((n\log n)/\epsilon)$ time and -$O(n\log n+n/\epsilon)$ space, where $q$, $1$ $\leq$ $q$ $\leq$ $n$, -is the minimum number of faces needed to cover all the vertices of -a certain planar graph we use. This data structure enables us to +in $o(q^{3/2})$ $+$ $O((n\log n)/\epsilon)$ time and +$O(n\log n+n/\epsilon)$ space, where $q$, $1$ $\leq$ $q$ $\leq$ $n$, +is the minimum number of faces needed to cover all the vertices of +a certain planar graph we use. This data structure enables us to report the length of a short path between two arbitrary query points in $O((\log n)/\epsilon+1/\epsilon^2)$ time and the actual path -in $O((\log n)/\epsilon+1/\epsilon^2+L)$ time, where $L$ is the -number of edges of the output path. The constant approximation -factor, $6+\epsilon$, for the short paths that we compute is quite -small. Our techniques are parallelizable and can also be used -to improve the previously best known results on several related +in $O((\log n)/\epsilon+1/\epsilon^2+L)$ time, where $L$ is the +number of edges of the output path. The constant approximation +factor, $6+\epsilon$, for the short paths that we compute is quite +small. Our techniques are parallelizable and can also be used +to improve the previously best known results on several related graphic and geometric problems." } @@ -36877,7 +36887,7 @@ avoids overlap. This is useful in cartography." This paper shows that the $i$-level of an arrangement of hyperplanes in $E^d$ has at most ${{i+d-1}\choose {d-1}}$ local minima. This bound follows from ideas previously used to prove bounds on $(\leq k)$-sets. -Using linear programming duality, +Using linear programming duality, the Upper Bound Theorem is obtained as a corollary, giving yet another proof of this celebrated bound on the number of vertices of a simple polytope @@ -42595,10 +42605,10 @@ Contains C code." , succeeds = "dp-olacd-91" , update = "98.11 bibrelex, 98.07 bibrelex, 95.09 mitchell" , annote = "In this paper you will find the definition of a Constrained - Delaunay Triangulation, some theorems and the pseudocode of - the algorithms to program it. On-Line means that you can - insert points and required edges in any order. With this - algorithm you can update an old CDT without retriangulating + Delaunay Triangulation, some theorems and the pseudocode of + the algorithms to program it. On-Line means that you can + insert points and required edges in any order. With this + algorithm you can update an old CDT without retriangulating the old data." } @@ -44319,7 +44329,7 @@ Contains C code." @techreport{d-vrtdd-09 , author = "Olivier Devillers" -, title = "Vertex Removal in Two Dimensional {Delaunay} Triangulation: +, title = "Vertex Removal in Two Dimensional {Delaunay} Triangulation: Asymptotic Complexity is Pointless" , thanks = "triangles" , institution = "INRIA" @@ -53238,17 +53248,17 @@ library." , update = "98.11 bibrelex, 98.03 mitchell, 97.11 bibrelex, 97.03 rote" , abstract = "We call a line $l$ a separator for a set $S$ of objects in the plane if $l$ avoids all the objects and - partitions $S$ into two nonempty subsets, one consisting + partitions $S$ into two nonempty subsets, one consisting of objects lying above $l$ and the - other of objects lying below $l$. We present an + other of objects lying below $l$. We present an $O(n log n)$-time algorithm for - finding a separator line for a set of $n$ segments, provided - the ratio between the diameter of the set of segments and + finding a separator line for a set of $n$ segments, provided + the ratio between the diameter of the set of segments and the length of the smallest segment is bounded. - No subquadratic algorithms are known for the general case. + No subquadratic algorithms are known for the general case. Our algorithm is based on the recent results of - Matousek, Pach, Sharir, Sifrony, and Welzl (1994) concerning - the union of fat triangles, but we also include an analysis + Matousek, Pach, Sharir, Sifrony, and Welzl (1994) concerning + the union of fat triangles, but we also include an analysis which improves the bounds obtained by Matousek et al." } @@ -57337,18 +57347,18 @@ a simple polygon with vertex set P. We prove that it is NP-complete to find a minimum weight polygon or a maximum weight polygon for a given vertex set, resulting in a proof of NP-completeness for the corresponding area optimization problems. This answers a generalization -of a question stated by Suri in 1989. +of a question stated by Suri in 1989. We give evidence that it is unlikely that the minimization -problem can be approximated. +problem can be approximated. For the maximiation problem, we show that we can find in optimal time O(n log n) a polygon of more than half the area AR(conv(P)) of the convex hull conv(P) of P, yielding a fast 1/2 approximation method for the problem. We demonstrate that it is NP-complete to decide whether there -is a simple polygon of at least (2/3+eps)(AR(conv(P)). +is a simple polygon of at least (2/3+eps)(AR(conv(P)). We also sketch an NP-hardness proof for the problem of finding a minimum-link searating polygon for two finite point sets in the plane. -Finally, we turn to higher dimensions, where we prove that for +Finally, we turn to higher dimensions, where we prove that for 0offsetting or dilating -a polygon). \cgalFootnote{The family of valid types of summands is slightly +a polygon.\cgalFootnote{The family of valid types of summands is slightly broader for certain operations, e.g., a degenerate polygon consisting of line segments is a valid operand for the approximate-offsetting operation.} This package, like the \ref Chapter_2D_Regularized_Boolean_Set-Operations @@ -32,7 +32,7 @@ by the \ref chapterArrangement_on_surface_2 "2D Arrangements" package. The two packages are integrated well to allow mixed operations. For example, it is possible to apply Boolean set operations on objects that are the result of Minkowski sum -computations. \cgalFootnote{The operands of the Minkowski sum operations +computations.\cgalFootnote{The operands of the Minkowski sum operations supported by this package must be (linear) polygons, as opposed to the operands of the Boolean set operations supported by the \ref Chapter_2D_Regularized_Boolean_Set-Operations @@ -51,11 +51,11 @@ edges ordered according to the angle they form with the \f$ x\f$-axis. As the two input polygons are convex, their edges are already sorted by the angle they form with the \f$ x\f$-axis; see the figure above. The Minkowski sum can therefore be computed using an operation similar to the -merge step of the merge-sort algorithm \cgalFootnote{See, for example, +merge step of the merge-sort algorithm\cgalFootnote{See, for example, http://en.wikipedia.org/wiki/Merge_sort.} in \f$ O(m + n)\f$ time, -starting from two bottommost vertices in \f$ P\f$ and in \f$ Q\f$ and merging -the ordered list of edges. +starting from the two bottommost vertices in \f$ P\f$ and in \f$ Q\f$ and +merging the ordered list of edges. \cgalFigureBegin{mink_figonecyc,ms_convex_polygon.png,ms_concave_polygon.png,ms_convolution.png} The convolution of a convex polygon and a non-convex polygon. The convolution @@ -95,11 +95,11 @@ positive orientations (i.e., their boundaries wind in a counterclockwise order around their interiors). The convolution of these two polygons \cgalCite{grs-kfcg-83}, denoted \f$ P * Q\f$, is a collection of line segments of the form -\f$ [p_i + q_j, p_{i+1} + q_j]\f$, \cgalFootnote{Throughout this +\f$ [p_i + q_j, p_{i+1} + q_j]\f$,\cgalFootnote{Throughout this chapter, we increment or decrement an index of a vertex modulo the size of the polygon.} where the vector \f$ {\mathbf{p_i p_{i+1}}}\f$ lies between \f$ {\mathbf{q_{j-1} q_j}}\f$ and -\f$ {\mathbf{q_j q_{j+1}}}\f$, \cgalFootnote{We say that a vector +\f$ {\mathbf{q_j q_{j+1}}}\f$,\cgalFootnote{We say that a vector \f$ {\mathbf v}\f$ lies between two vectors \f$ {\mathbf u}\f$ and \f$ {\mathbf w}\f$ if we reach \f$ {\mathbf v}\f$ strictly before reaching \f$ {\mathbf w}\f$ if we move all three vectors to the @@ -114,7 +114,7 @@ The segments of the convolution form a number of closed (not necessarily simple) polygonal curves called convolution cycles. The Minkowski sum \f$ P \oplus Q\f$ is the set of points having a non-zero winding number with respect to the cycles of -\f$ P * Q\f$. \cgalFootnote{Informally speaking, the winding number +\f$ P * Q\f$.\cgalFootnote{Informally speaking, the winding number of a point \f$ p \in\mathbb{R}^2\f$ with respect to some planar curve \f$ \gamma\f$ is an integer number counting how many times does \f$ \gamma\f$ wind in a counterclockwise direction around \f$ p\f$.} @@ -157,14 +157,29 @@ the sub-sums \f$ S_{ij}\f$ when using the decomposition approach. As both approaches construct the arrangement of these segments and extract the sum from this arrangement, computing Minkowski sum using the convolution approach usually generates a smaller intermediate -arrangement, hence it is faster and consumes less space. In most cases, +arrangement; hence it is faster and consumes less space. In most cases, the reduced convolution method is faster than the full convolution method, as the respective induced arrangement is usually much smaller. However, in degenerate cases with many holes in the Minkowski sum, the full convolution method can be preferable, as it avoids costly intersection tests. -\subsection mink_ssecsum_conv Computing Minkowski Sum using Convolutions +\subsection mink_ssec_hole_filter Filtering Out Holes + +If a hole in one polygon is relatively small compared to the other +polygon, the hole is irrelevant for the computation of +\f$P\oplus Q \f$ \cgalCite{bfhhm-epsph-15}; It implies that the hole +can be removed (that is, filled up) before the main computation starts. +Theoretically, we can always fill up all the holes of at least one +polygon, transforming it into a simple polygon, and still obtain +exactly the same Minkowski sum. Practically, we remove all holesin one +polygon whose bounding boxes are, in \f$x \f$- or \f$y \f$-direction, +smaller than, or as large as, the bounding box of the other polygon. +Obliterating holes in the input summands speeds up the computation of +Minkowski sums, regardless of the approach used to compute the +Minkowski sum. + +\subsection mink_ssec_conv Computing Minkowski Sum using Convolutions The function template \link minkowski_sum_2() `minkowski_sum_2(P, Q)`\endlink accepts two polygons @@ -182,7 +197,8 @@ the function \link minkowski_sum_full_convolution_2() the \link Polygon_2 `Polygon_2`\endlink class template. The types of operands accepted by the function \link minkowski_sum_reduced_convolution_2() -`minkowski_sum_reduced_convolution_2(P, Q)`\endlink +`minkowski_sum_reduced_convolution_2(P, Q)`\endlink (and by the +function \link minkowski_sum_2() `minkowski_sum_2(P, Q)`\endlink) are instances of either the \link Polygon_2 `Polygon_2`\endlink or \link Polygon_with_holes_2 `Polygon_with_holes_2`\endlink class templates. Even when the input polygons are restricted to be simple polygons, they @@ -229,7 +245,7 @@ in both summands is large, the decomposition approach runs faster. In the following we describe how to employ the decomposition-based Minkowski sum procedure. -\subsection mink_ssecdecomp Decomposition Strategies +\subsection mink_ssec_decomp_strategies Decomposition Strategies In order to compute Minkowski sums of two polygon \f$ P \f$ and \f$ Q \f$ using the decomposition method, issue the call @@ -242,7 +258,7 @@ of a type that models the concept `PolygonWithHolesConvexDecomposition_2`, which refines the concept `PolygonConvexDecomposition_2`. The same holds for \f$Q \f$. The two concepts `PolygonConvexDecomposition_2` and -`PolygonWithHolesConvexDecomposition` refine a `Functor` concept +`PolygonWithHolesConvexDecomposition_2` refine a `Functor` concept variant. Namely, they both require the provision of a function operator (`operator()`). The function operator of the model of the concept `PolygonConvexDecomposition_2` accepts a planar simple @@ -251,14 +267,25 @@ polygon, while the function operator of the model of the concept with holes. Both return a range of convex polygons that represents the convex decomposition of the input polygon. If the decomposition strategy that decomposes \f$P \f$ is the same as the strategy that -decompose \f$Q \f$, you can omit the forth argument, and -issue the call `minkowski_sum_2(P, Q, decomp)`. +decomposes \f$Q \f$, you can omit the forth argument, and +issue the call `minkowski_sum_2(P, Q, decomp)`, where `decomp` +is an object that represents the common strategy. +The class template `Polygon_nop_decomposition_2`, which models the +concept `PolygonConvexDecomposition_2`, is a trivial convex +decomposition strategy referred to as the nop strategy; it +merely passes the input polygon to the next stage intact; use it in +cases you know that the corresponding input polygon is convex to +start with. If both \f$P \f$ and \f$Q \f$ are known to be convex, +you can issue the call `minkowski_sum_2(P, Q, nop)`, where `nop` +is an object that represents the nop strategy. The Minkowski-sum package includes four models of the concept -`PolygonConvexDecomposition_2` and two models of the refined concept +`PolygonConvexDecomposition_2` (besides the trivial model +`Polygon_nop_decomposition_2`) and two models of the refined concept `PolygonWithHolesConvexDecomposition_2` as described below. The first -three are class templates that wrap the decomposition functions included -in the \ref Chapter_2D_Polygon "Planar Polygon Partitioning" package. +three are class templates that wrap the corresponding decomposition +functions included in the +\ref Chapter_2D_Polygon "Planar Polygon Partitioning" package.
  • The `Optimal_convex_decomposition_2` class template uses @@ -310,10 +337,7 @@ vertex and having rational-coordinate endpoints on both sides. The following are two models of the refined concept `PolygonWithHolesConvexDecomposition_2`. An instance of any one these -two types can be used to decompose a polygon with holes. You can pass -the instance as the third argument to call -`minkowski_sum_2(P, Q, decomp)` to compute the Minkowski sum of two -polygons with holes, \f$P \f$ and \f$Q \f$. +two types can be used to decompose a polygon with holes.
    • The `Polygon_vertical_decomposition_2` class template uses vertical decomposition to decompose the underlying arrangement; @@ -332,6 +356,29 @@ angle-bisector decomposition strategy. \cgalExample{Minkowski_sum_2/sum_by_decomposition.cpp} +\subsection mink_ssec_optimal_decomp Optimal Decomposition + +Decomposition methods that handle polygons with holes are typically +more costly than decomposition methods that handle only simple +polygons. The hole filtration (see \ref mink_ssec_hole_filter) is +applied before the actual construction starts (be it convolution +based or decomposition based). The filteration may result with a +polygon that does not have holes, or even a convex polygon, but this +is unkown at the time of the call. To this end, we introduce the +overloaded function template +`minkowski_sum_by_decomposition_2(P, Q, no_holes_decomp, with_holes_decomp)`, +where `no_holes_decomp` and `with_holes_decomp` are objects that model +the concepts `PolygonConvexDecomposition_2` and +`PolygonWithHolesConvexDecomposition_2`, respectively. If after the +application of the hole filtration \f$P\f$ remains a polygon with holes, +then the strategy represented by the object `with_holes_decomp` is +applied to it. If, however, \f$P\f$ turns into a polygon without holes, +then the strategy represented by the object `no_holes_decomp` is applied +to it, unless the result is a convex polygon, in which case the nop +strategy is applied. If \f$P\f$ is a polygon without holes to start with, +then only convexity is checked. (Checking whether the result is convex +inccurs a small overhead though.) The same holds for \f$Q\f$. + \section mink_secoffset Offsetting a Polygon The operation of computing the Minkowski sum \f$ P \oplus B_r\f$ of a @@ -388,7 +435,7 @@ sub-polygon, and finally calculating the union of these offsets sub-polygons; see \cgalFigureRef{mink_figpgn_offset} (b). However, as with the case of the Minkowski sum of a pair of polygons, it is also more efficient to compute the convolution cycle of the polygon and the -disc \f$ B_r\f$, \cgalFootnote{As the disc is convex, it is guaranteed +disc \f$ B_r\f$,\cgalFootnote{As the disc is convex, it is guaranteed that the convolution curve comprises a single cycle.} which can be constructed by applying the process described in the previous paragraph for convex polygons: The only difference is that a circular arc @@ -399,7 +446,7 @@ the last step consists of computing the winding numbers of the faces of the arrangement induced by the convolution cycle and discarding the faces with zero winding numbers. -\subsection mink_ssecapprox_offset Approximating the Offset with a Guaranteed Error Bound +\subsection mink_ssec_approx_offset Approximating the Offset with a Guaranteed Error Bound Let \f$ P \f$ be a counterclockwise-oriented simple polygon all vertices of which \f$ p_0, \ldots, p_{n-1} \f$ have rational coordinates, @@ -429,7 +476,7 @@ is supported by the line \f$ Ax + By + C' = 0 \f$, where rational number. Therefore, the line segments that compose the offset boundaries cannot be represented as segments of lines with rational coefficients. -In Section \ref mink_ssecexact_offset we use the line-pair representation +In Section \ref mink_ssec_exact_offset we use the line-pair representation to construct the offset polygonin an exact manner using the traits class for conic arcs.
    @@ -504,7 +551,7 @@ the header file `bops_circular.h`, which defines the polygon types. \cgalExample{Minkowski_sum_2/approx_offset.cpp} -\subsection mink_ssecexact_offset Computing the Exact Offset +\subsection mink_ssec_exact_offset Computing the Exact Offset As mentioned in the previous section, it is possible to represent offset polygons in an exact manner if the edges of the polygons are @@ -548,7 +595,7 @@ handles polygons with holes, such as the `Polygon_vertical_decomposition_2` class template. \cgalAdvancedEnd -\subsection mink_ssecinner_offset Computing Inner Offsets +\subsection mink_ssec_inner_offset Computing Inner Offsets An operation closely related to the (outer) offset computation, is computing the inner offset of a polygon, or insetting it