interp/0000755000176200001440000000000013605457471011567 5ustar liggesusersinterp/NAMESPACE0000644000176200001440000000302113605104165012767 0ustar liggesusers## load shared library useDynLib(interp) ## exports export("interp") export("interpp") export("franke.fn") export("franke.data") export("locpoly") export("nearest.neighbours") export("tri.mesh") export("plot.triSht") export("print.triSht") export("triangles") export("tri.find") export("arcs") export("circles") export("area") export("voronoi.mosaic") export("print.voronoi") export("plot.voronoi") export("voronoi.area") export("voronoi.findrejectsites") export("voronoi.polygons") export("plot.voronoi.polygons") export("convex.hull") export("left") export("on") export("in.convex.hull") export("on.convex.hull") export("outer.convhull") export("identify.triSht") export("summary.triSht") export("print.summary.triSht") export("summary.voronoi") export("print.summary.voronoi") ## imports importFrom("Rcpp", "evalCpp") importFrom("deldir", "deldir") importFrom("deldir", "triang.list") #importFrom("sp","coordinates") #importFrom("sp","coordinates<-") #importFrom("sp","gridded<-") importFrom("stats", "median") importFrom("graphics", "hist", "plot", "segments", "text", "polygon") importFrom("graphics", "identify", "lines", "plot.new", "plot.window", "points", "title") importFrom("grDevices","heat.colors") ## S3 methods S3method("plot", "triSht") S3method("print", "triSht") S3method("print", "summary.triSht") S3method("summary", "triSht") S3method("identify", "triSht") S3method("plot", "voronoi") S3method("print", "voronoi") S3method("print", "summary.voronoi") S3method("summary", "voronoi") S3method("plot", "voronoi.polygons") interp/README0000644000176200001440000000042213605104165012432 0ustar liggesusersThis package contains a FOSS (re)implementation of the interp* functions from package akima, as well as FOSS version of the basic functions of package tripack. These functions are backward compatible in its arguments. Currently the implementation is only partly finished. interp/ChangeLog0000644000176200001440000002147613605172404013341 0ustar liggesuserscommit 067ca3855957c4c1b92164a296226515d634fb93 Author: Albrecht Gebhardt Date: Tue Jan 7 22:15:22 2020 +0100 add a strict mode (default equal to old behaviour) to in.convex.hull. When switched off, also points on the border are treated as belonging to the hull. commit 2fb7a4b7406ce614c6da71bd4644dd23907368ad Author: Albrecht Gebhardt Date: Tue Jan 7 12:33:56 2020 +0100 fix in.convex.hull (path was not closed before checking!) commit e4b8fc73220c3aab05a9d8eaa7c9a97e2ff49af3 Author: Albrecht Gebhardt Date: Fri May 3 22:46:58 2019 +0200 update ChangeLog commit 3209782a6d30e6e4135e5845eab8673a508009b8 Author: Albrecht Gebhardt Date: Fri May 3 22:44:41 2019 +0200 fix problems revealed by R 3.6, options all and isometric now compatible commit 30383e0838e0c95e69bf43cf660367359a984a8d Author: Albrecht Gebhardt Date: Fri May 3 19:52:40 2019 +0200 * port some additional functions related to voronoi moasics from tripack package. * default to isometric plots for voronoi mosaics commit 62a898c289349480b682219f79e1507e8242acb2 Author: Albrecht Gebhardt Date: Sun Mar 25 22:15:03 2018 +0200 update date field commit 3cb0c5dc78b0d1b15f03ef8207069aaa54b5066d Author: Albrecht Gebhardt Date: Sun Mar 25 20:00:16 2018 +0200 changelog updated commit 89a9ac6d92b3fb91a769afb35cb1d06e5e8718a6 Author: Albrecht Gebhardt Date: Sun Mar 25 19:18:56 2018 +0200 fix gcc warnings commit 86e846acab53cc8ebecf43cb4e98c85d533619b8 Author: Albrecht Gebhardt Date: Mon Feb 26 22:20:47 2018 +0100 enable output="points" commit 970190c55f73fc160f806d3f3eec2aa74e4b93b1 Author: Albrecht Gebhardt Date: Tue Jan 16 21:31:29 2018 +0100 finally fix -Wreorder problem commit 32531bc134512722fa131319c6bf8c2534b5861c Author: Albrecht Gebhardt Date: Tue Jan 16 20:21:18 2018 +0100 bump release commit cbe34be5565c5985a2fb61f46206324a0baa6fee Author: Albrecht Gebhardt Date: Thu Jan 11 11:34:17 2018 +0100 fix -Wreorder commit 0ef7a8b700e5f5c92a20b5adda3425444d10b336 Author: Albrecht Gebhardt Date: Sat Jun 17 12:44:05 2017 +0200 next release commit 78f12595350f67f171a5b16d841ca89430a65ecc Author: Albrecht Gebhardt Date: Sat Jun 17 12:43:25 2017 +0200 eliminate second memory problem commit 7ae9e390014fde5ab1ae255b2b39acb240d7bad6 Author: Albrecht Gebhardt Date: Fri Jun 16 19:08:57 2017 +0200 update Changelog commit 445bf2b942453acca1aa5949fef28e0cc1ecb75c Author: Albrecht Gebhardt Date: Fri Jun 16 19:04:36 2017 +0200 * add more details to description * fix index offset by one commit 6b5585a0bce0ec57f5371f1f8fecf2ea70edefee Author: Albrecht Gebhardt Date: Thu Jun 15 17:12:58 2017 +0200 next typo commit 4e4c0c23c49e08f293349c5c38f662e1ef762290 Author: Albrecht Gebhardt Date: Thu Jun 15 17:11:32 2017 +0200 prepare for upload commit cc970d97d907b8e417466be8f59fc7ec95313be4 Author: Albrecht Gebhardt Date: Thu Jun 15 17:07:52 2017 +0200 last help page fixes commit 98bf0a62b7c4bacdba4f366e0cf2c24ea9945ddc Merge: cd4be18 844a6f3 Author: Gebhardt, Albrecht Date: Thu Jun 15 15:07:02 2017 +0200 Merge branch 'master' into 'master' minor edits in help files See merge request !3 commit 844a6f37805e7587bc54b6f98da63c90961e973f Author: Roger Bivand Date: Thu Jun 15 14:39:31 2017 +0200 minor edits in help files commit cd4be18c876f73a31b56db8c3c1e95b0703c9bee Author: Albrecht Gebhardt Date: Thu Jun 15 12:36:43 2017 +0200 some simplifications commit c980d5d29d891948a093cb4912554b4eeaea06e4 Author: Albrecht Gebhardt Date: Mon Jun 12 14:15:35 2017 +0200 last? fixes for tri->triSht renaming commit 02086a6f3cc1d194aebedb4a6073d6263343999e Author: Albrecht Gebhardt Date: Mon Jun 12 13:50:27 2017 +0200 fix return object type commit 18b561bc205fe475691181106427dc6578459bbe Author: Albrecht Gebhardt Date: Mon Jun 12 11:28:32 2017 +0200 split Changelog file commit 0692a43913b394712691453e627bc22d953941e8 Author: Albrecht Gebhardt Date: Mon Jun 12 11:06:17 2017 +0200 rename "tri" objects to "triSht" commit 18c3ed6cb82fdc0c6b11468ac65c148e8bfa47b6 Author: Albrecht Gebhardt Date: Sun Jun 11 21:47:36 2017 +0200 fill help pages a little bit more. commit db5ba44bb95fcff8c1dc3fd135d0841982658bea Merge: 5919aa2 919fe42 Author: Albrecht Gebhardt Date: Sat Jun 10 15:15:52 2017 +0200 merge fixes Merge branch 'master' of gitlab.aau.at:agebhard/interp Conflicts: DESCRIPTION man/interp.Rd commit 5919aa297c52b2fff2a983ab14ba9c2f1c52208f Author: Albrecht Gebhardt Date: Sat Jun 10 15:04:40 2017 +0200 several small fixes commit 919fe42da4a1a37d91aea8eee3b95e16ec8b3721 Merge: c2a1954 5549987 Author: Gebhardt, Albrecht Date: Sat Jun 10 13:51:57 2017 +0200 Merge branch 'master' into 'master' Master See merge request !2 commit 55499878c77f29f1526ff3da73786b3a6e46b01a Author: Roger Bivand Date: Mon Jun 5 22:57:36 2017 +0200 tidy commit b0cb35099ea5921fa7b276087260ed48087ac9af Author: Roger Bivand Date: Mon Jun 5 14:59:03 2017 +0200 tidy commit af1e425f715574432587286fbce3fc24ef13ab12 Author: Roger Bivand Date: Mon Jun 5 08:16:32 2017 +0200 register routines commit c2a19542e6d24a32ea4989b875a0d157431f82a6 Author: Gebhardt, Albrecht Date: Mon May 29 17:03:43 2017 +0200 Delete .Rhistory commit 1b5bd75d2b28516255bec574ef6244bff72896a9 Author: Albrecht Gebhardt Date: Mon May 29 17:01:32 2017 +0200 add/extend notes commit b58b79537f782509db59b9ab9dd6d9985493deac Merge: 3c9c5c1 e728813 Author: Gebhardt, Albrecht Date: Mon May 29 14:09:41 2017 +0200 Merge branch 'master' into 'master' change UTF-8 to ASCII and remove CVS folders See merge request !1 commit e728813b55f128a214cddb8a82ecf2645b2b18c2 Merge: ffa5535 3c9c5c1 Author: Roger Bivand Date: Wed May 24 11:24:43 2017 +0200 Merge remote-tracking branch 'upstream/master' commit 3c9c5c123f818fde02ee175586bce7ecd1bd51a8 Merge: 326ae33 036d935 Author: Albrecht Gebhardt Date: Mon May 22 09:45:24 2017 +0200 Merge branch 'master' of gitlab.aau.at:agebhard/interp commit 326ae3371018999026c8f446d15975e4c6ed0ae3 Author: Albrecht Gebhardt Date: Mon May 22 09:43:41 2017 +0200 added commit ffa5535187c8e6e29dbe4786b15b321ac78c1f9c Author: Roger Bivand Date: Fri May 19 12:47:07 2017 +0200 tidy commit 2744175274ed55ad1c7890b7a4785a7a5d941e4b Author: Roger Bivand Date: Thu May 18 19:02:53 2017 +0200 change UTF-8 to ASCII and remove CVS folders commit 036d935da35830d91bd634b32dbcb570620c7325 Author: Albrecht Gebhardt Date: Thu May 18 18:00:53 2017 +0200 added commit cad84cdcc1e77225d2baf1118674061ecf0346e2 Author: Albrecht Gebhardt Date: Tue May 16 10:13:03 2017 +0200 more things for gitignore commit a5405d32a5728fc6295907c55a41d476d19b3b94 Author: Gebhardt, Albrecht Date: Mon May 15 22:54:52 2017 +0200 more \eqn stuff commit 2d80551af5a212c69478a0b59d19d4898fe273a9 Author: Gebhardt, Albrecht Date: Mon May 15 22:48:04 2017 +0200 fix commit dfc70b0917439718bdf8462cac9def88fdeb8216 Author: Gebhardt, Albrecht Date: Mon May 15 22:40:06 2017 +0200 added commit bfd43f6902c3cee146b199b68fe46318f7f56ed3 Author: Gebhardt, Albrecht Date: Mon May 15 22:35:45 2017 +0200 more changes from seafile commit b1e990a04ad0af586b6a0963517a9b755b270124 Author: Gebhardt, Albrecht Date: Mon May 15 22:31:28 2017 +0200 cleanup commit 76d8ec527e372a15c11031261395fd350318adae Author: Gebhardt, Albrecht Date: Mon May 15 22:19:24 2017 +0200 Imported from CVS interp/data/0000755000176200001440000000000013605104165012465 5ustar liggesusersinterp/data/franke.rda0000644000176200001440000000373613605104165014434 0ustar liggesusersU XfSMKd,l%,nlB+/aTH$ 2ITL5ݦi50:u&}}|lkAP͈BTÈbL1IӵufޛB![bl1$b¶L*̖ usVb[\pʸ0v ^ qጂb!Һ؛hd?->+%lq+Rw:x_x[xn8ᝀ=E{'~.5_w26q]bԷ|\hfpJb.UonJmg]pAFeu Y(+W\z~42Pxzg¤%yu 93A8խ+ճyEi}x*,32G  'ׁMlc{y7;uM\drEzv Ñ\[|nt0$ZEł ): 17 6x",um7&V$ßsX s]5 %Ymc{y}_i)ڸ?MO]gzȞ8/l hϋ(YegF c=fւz{4fC>dկŅ`X[Ӟa {s^^9`6[Q(p}"!?;UoVAܲ~ ; tlCfh.2eعC6v % Vԏj̙]V|~#rzϼq G9@Qn ۝;PAm]FOX:־ZWD4'~ {+(ȺjRH;ɧ837i?҃3?H%[*ٺom xwHRAB\VKI`->IP~1lO Q*3V7+eî|"iZJ|afБO7~" ~MP]w a *c}!&>s%y{+?|b7[=孇Wb,Z^FUPUƞ^-l8_ԣ&vᐹ^ ă9TI‹[Ƃ137ٱVkG^;AZO\"Z͍;W@ԛuFq;&r%˔N4MscK2/3ٙ^K4/Rif1'[G?#ej%i@Yx⠠dӲtGb }1 (Nl0h\"3 :h!s9/*g7%11#!:jΒ6ȋ3Q'85snIs-s!7.mcEu;?8S_"-JI2o>aBpnZ at?m׼|h*kM!ȵ";IcHH l0{ԦCs .[w<icO;;_z9Whplom_6zvr|mdȀGoo9üp2/io; kcpō|bO;} _xAHҞEH 1'MB"o8}"$'"j d%Dez"y#$uo}<+SLJ<_x@a5QGM'# ]_, $&B6u?xM$O7^[xH<]|Fxт]NsL) interp/man/0000755000176200001440000000000013605174655012342 5ustar liggesusersinterp/man/locpoly.Rd0000644000176200001440000001557313605104165014312 0ustar liggesusers\name{locpoly} \alias{locpoly} \title{ Local polynomial fit. } \description{ This function performs a local polynomial fit of up to order 3 to bivariate data. It returns estimated values of the regression function as well as estimated partial derivatives up to order 3. } \usage{ locpoly(x, y, z, xo = seq(min(x), max(x), length = nx), yo = seq(min(y), max(y), length = ny), nx = 40, ny = 40, input = "points", output = "grid", h = 0, kernel = "uniform", solver = "QR", degree = 3, pd = "") } \arguments{ \item{x}{ vector of \eqn{x}-coordinates of data points. Missing values are not accepted. } \item{y}{ vector of \eqn{y}-coordinates of data points. Missing values are not accepted. } \item{z}{ vector of \eqn{z}-values at data points. Missing values are not accepted. \code{x}, \code{y}, and \code{z} must be the same length } \item{xo}{ If \code{output="grid"} (default): sequence of \eqn{x} locations for rectangular output grid, defaults to \code{nx} points between \code{min(x)} and \code{max(x)}. If \code{output="points"}: vector of \eqn{x} locations for output points. } \item{yo}{ If \code{output="grid"} (default): sequence of \eqn{y} locations for rectangular output grid, defaults to \code{ny} points between \code{min(y)} and \code{max(y)}. If \code{output="points"}: vector of \eqn{y} locations for output points. In this case it has to be same length as \code{xo}. } \item{input}{ text, possible values are \code{"grid"} (not yet implemented) and \code{"points"} (default). This is used to distinguish between regular and irregular gridded data. } \item{output}{ text, possible values are \code{"grid"} (=default) and \code{"points"}. If \code{"grid"} is choosen then \code{xo} and \code{yo} are interpreted as vectors spanning a rectangular grid of points \eqn{(xo[i],yo[j])}, \eqn{i=1,...,nx}, \eqn{j=1,...,ny}. This default behaviour matches how \code{akima::interp} works. In the case of \code{"points"} \code{xo} and \code{yo} have to be of same lenght and are taken as possibly irregular spaced output points \eqn{(xo[i],yo[i])}, \eqn{i=1,...,no} with \code{no=length(xo)}. \code{nx} and \code{ny} are ignored in this case. } \item{nx}{ dimension of output grid in x direction } \item{ny}{ dimension of output grid in y direction } \item{h}{ bandwidth parameter, between 0 and 1. If a scalar is given it is interpreted as ratio applied to the dataset size to determine a local search neighbourhood, if set to 0 a minimum useful search neighbourhood is choosen (e.g. 10 points for a cubic trend function to determine all 10 parameters). If a vector of length 2 is given both components are interpreted as ratio of the \eqn{x}- and \eqn{y}-range and taken as global bandwidth. } \item{kernel}{ Text value, implemented kernels are \code{uniform} (default), \code{epanechnikov} and \code{gaussian}. } \item{solver}{ Text value, determines used solver in fastLM algorithm used by this code Possible values are \code{LLt}, \code{QR} (default), \code{SVD}, \code{Eigen} and \code{CPivQR} (compare \code{\link[RcppEigen]{fastLm}}). %% FIXME: translate their integer codes to our string values! } \item{degree}{ Integer value, degree of polynomial trend, maximum allowed value is 3. } \item{pd}{ Text value, determines which partial derivative should be returned, possible values are \code{""} (default, the polynomial itself), \code{"x"}, \code{"y"}, \code{"xx"}, \code{"xy"}, \code{"yy"}, \code{"xxx"}, \code{"xxy"}, \code{"xyy"}, \code{"yyy"} or \code{"all"}. } } %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{ %% ~Describe the value returned %% If it is a LIST, use If \code{pd="all"}: \item{x }{\eqn{x} coordinates} \item{y }{\eqn{y} coordinates} \item{z }{estimates of \eqn{z}} \item{zx }{estimates of \eqn{dz/dx}} \item{zy }{estimates of \eqn{dz/dy}} \item{zxx }{estimates of \eqn{d^2z/dx^2}} \item{zxy }{estimates of \eqn{d^2z/dxdy}} \item{zyy }{estimates of \eqn{d^2z/dy^2}} \item{zxxx }{estimates of \eqn{d^3z/dx^3}} \item{zxxy }{estimates of \eqn{d^3z/dx^2dy}} \item{zxyy }{estimates of \eqn{d^3z/dxdy^2}} \item{zyyy }{estimates of \eqn{d^3z/dy^3}} If \code{pd!="all"} only the elements \code{x}, \code{y} and the desired derivative will be returned, e.g. \code{zxy} for \code{pd="xy"}. } \references{ Douglas Bates, Dirk Eddelbuettel (2013). Fast and Elegant Numerical Linear Algebra Using the RcppEigen Package. Journal of Statistical Software, 52(5), 1-24. URL http://www.jstatsoft.org/v52/i05/. } \author{ Albrecht Gebhardt , Roger Bivand } \note{ Function \code{\link[KernSmooth]{locpoly}} of package \code{KernSmooth} performs a similar task for univariate data. } \seealso{ \code{\link[KernSmooth]{locpoly}}, \code{\link[RcppEigen]{fastLm}} } \examples{ ## choose a kernel knl <- "gaussian" ## choose global and local bandwidth bwg <- 0.25 # *100% of x- y-range bwl <- 0.1 # *100% of data set ## a bivariate polynomial of degree 5: f <- function(x,y) 0.1+ 0.2*x-0.3*y+0.1*x*y+0.3*x^2*y-0.5*y^2*x+y^3*x^2+0.1*y^5 ## degree of model dg=3 ## part 1: ## regular gridded data: ng<- 21 # x/y size of a square data grid ## build and fill the grid with the theoretical values: xg<-seq(0,1,length=ng) yg<-seq(0,1,length=ng) # xg and yg as matrix matching fg nx <- length(xg) ny <- length(yg) xx <- t(matrix(rep(xg,ny),nx,ny)) yy <- matrix(rep(yg,nx),ny,nx) fg <- outer(xg,yg,f) ## local polynomial estimate ## global bw: ttg <- system.time(pdg <- locpoly(xg,yg,fg, input="grid", pd="all", h=c(bwg,bwg), solver="QR", degree=dg, kernel=knl)) ## time used: ttg ## local bw: ttl <- system.time(pdl <- locpoly(xg,yg,fg, input="grid", pd="all", h=bwl, solver="QR", degree=dg, kernel=knl)) ## time used: ttl image(pdg$x,pdg$y,pdg$z) contour(pdl$x,pdl$y,pdl$zx,add=TRUE,lty="dotted") contour(pdl$x,pdl$y,pdl$zy,add=TRUE,lty="dashed") points(xx,yy,pch=".") ## part 2: ## irregular data, ## results will not be as good as with the regular 21*21=231 points. nd<- 41 # size of data set ## random irregular data oldseed <- set.seed(42) x<-runif(ng) y<-runif(ng) set.seed(oldseed) z <- f(x,y) ## global bw: ttg <- system.time(pdg <- interp::locpoly(x,y,z, xg,yg, pd="all", h=c(bwg,bwg), solver="QR", degree=dg,kernel=knl)) ttg ## local bw: ttl <- system.time(pdl <- interp::locpoly(x,y,z, xg,yg, pd="all", h=bwl, solver="QR", degree=dg,kernel=knl)) ttl image(pdg$x,pdg$y,pdg$z) contour(pdl$x,pdl$y,pdl$zx,add=TRUE,lty="dotted") contour(pdl$x,pdl$y,pdl$zy,add=TRUE,lty="dashed") points(x,y,pch=".") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ models }% use one of RShowDoc("KEYWORDS") \keyword{ regression }% __ONLY ONE__ keyword per line interp/man/area.Rd0000644000176200001440000000155213605104165013531 0ustar liggesusers\name{area} \alias{area} \title{ Extract a list of triangle areas from a triangulation object. } \description{ This function returns a list containing the areas of each triangle of a triangulation object created by \code{tri.mesh}. } \usage{ area(tri.obj) } \arguments{ \item{tri.obj}{ object of class \code{\link{triSht}} } } \details{ This function acesses the \code{cclist} component of a triangulation object returned by \code{\link{tri.mesh}} and extracts the areas of the triangles contained in this triangulation. } \value{ A vector containing the area values. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{triangles}}, \code{\link{arcs}} } \examples{ data(franke) tr <- tri.mesh(franke$ds3) area(tr) } \keyword{ spatial } \keyword{ utilities } interp/man/franke.data.Rd0000644000176200001440000000564213605104165015003 0ustar liggesusers\name{franke.data} \alias{franke.data} \alias{franke.fn} \alias{franke} \title{ Test datasets from Franke for interpolation of scattered data } \description{ \code{franke.data} generates the test datasets from Franke, 1979, see references. } \usage{ franke.data(fn = 1, ds = 1, data) franke.fn(x, y, fn = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{fn}{ function number, from 1 to 5. } \item{x}{'x' value} \item{y}{'y' value} \item{ds}{ data set number, from 1 to 3. Dataset 1 consists of 100 points, dataset 2 of 33 points and dataset 3 of 25 points scattered in the square \eqn{[0,1]\times[0,1]}{[0,1]x[0,1]}. (and partially slightly outside). } \item{data}{ A list of dataframes with 'x' and 'y' to choose from, dataset \code{franke} should be used here. } } \details{ These datasets are mentioned in Akima, (1996) as a testbed for the irregular scattered data interpolator. Franke used the five functions: \deqn{0.75e^{-\frac{(9x-2)^2+(9y-2)^2}{4}}+ 0.75e^{-\frac{(9x+1)^2}{49}-\frac{9y+1}{10}}+ 0.5e^{-\frac{(9x-7)^2+(9y-3)^2}{4}}- 0.2e^{-((9x-4)^2-(9y-7)^2)} }{0.75*exp(-((9*x-2)^2+(9*y-2)^2)/4)+ 0.75*exp(-((9*x+1)^2)/49-(9*y+1)/10)+ 0.5*exp(-((9*x-7)^2+(9*y-3)^2)/4)- 0.2*exp(-(9*x-4)^2-(9*y-7)^2)} \deqn{\frac{\mbox{tanh}(9y-9x)+1}{9}}{(tanh(9*y-9*x)+1)/9} \deqn{\frac{1.25+\cos(5.4y)}{6(1+(3x-1)^2)}}{(1.25+cos(5.4*y))/(6*(1+(3*x-1)^2))} \deqn{e^{-\frac{81((x-0.5)^2+\frac{(y-0.5)^2}{16})}{3}}}{exp(-81*((x-0.5)^2+(y-0.5)^2)/16)/3} \deqn{e^{-\frac{81((x-0.5)^2+\frac{(y-0.5)^2}{4})}{3}}}{exp(-81*((x-0.5)^2+(y-0.5)^2)/4)/3} \deqn{\frac{\sqrt{64-81((x-0.5)^2+(y-0.5)^2)}}{9}-0.5}{sqrt(64-81*((x-0.5)^2+(y-0.5)^2))/9-0.5} and evaluated them on different more or less dense grids over \eqn{[0,1]\times[0,1]}{[0,1]x[0,1]}. } \value{ A data frame with components \item{x }{'x' coordinate} \item{y }{'y' coordinate} \item{z }{'z' value} } \note{ The datasets have to be generated via \code{franke.data} before use, the dataset \code{franke} only contains a list of 3 dataframes of 'x' and 'y' coordinates for the above mentioned irregular grids. Do not forget to load the \code{franke} dataset first. The 'x' and 'y' values have been taken from Akima (1996). } \references{ FRANKE, R., (1979). A critical comparison of some methods for interpolation of scattered data. Tech. Rep. NPS-53-79-003, Dept. of Mathematics, Naval Postgraduate School, Monterey, Calif. Akima, H. (1996). Algorithm 761: scattered-data surface fitting that has the accuracy of a cubic polynomial. ACM Transactions on Mathematical Software \bold{22}, 362--371. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{interp}} } \examples{ ## generate Frankes data set for function 2 and dataset 3: data(franke) F23 <- franke.data(2,3,franke) str(F23) } \keyword{ datagen } interp/man/circles.Rd0000644000176200001440000000143213605104165014242 0ustar liggesusers\name{circles} \alias{circles} \title{Add circles to a plot} \description{ This function plots circles at given locations with given radii. } \usage{ circles(x, y, r, ...) } \arguments{ \item{x}{ vector of x coordinates } \item{y}{ vector of y coordinates } \item{r}{ vactor of radii } \item{\dots}{ additional graphic parameters will be passed through } } \note{ This function needs a previous plot where it adds the circles. This function was earlier used in package \code{tripack}. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{lines}}, \code{\link{points}} } \examples{ x<-rnorm(10) y<-rnorm(10) r<-runif(10,0,0.5) plot(x,y, xlim=c(-3,3), ylim=c(-3,3), pch="+") circles(x,y,r) } \keyword{ aplot } interp/man/outer.convhull.Rd0000644000176200001440000000244413605104165015611 0ustar liggesusers\name{outer.convhull} \title{Version of outer which operates only in a convex hull} \usage{outer.convhull(cx,cy,px,py,FUN,duplicate="remove",...) } \alias{outer.convhull} \arguments{ \item{cx}{x cordinates of grid} \item{cy}{y cordinates of grid} \item{px}{vector of x coordinates of points} \item{py}{vector of y coordinates of points} \item{FUN}{function to be evaluated over the grid} \item{duplicate}{indicates what to do with duplicate \eqn{(px_i,py_i)} points, default \code{"remove"}.} \item{...}{additional arguments for \code{FUN}} } \description{This version of \code{outer} evaluates \code{FUN} only on that part of the grid \eqn{cx} times \eqn{cy} that is enclosed within the convex hull of the points \eqn{(px,py)}. This can be useful for spatial estimation if no extrapolation is wanted. } \value{Matrix with values of \code{FUN} (\code{NA}s if outside the convex hull). } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{\code{\link{in.convex.hull}} } \examples{ x<-runif(20) y<-runif(20) z<-runif(20) z.lm<-lm(z~x+y) f.pred<-function(x,y) {predict(z.lm,data.frame(x=as.vector(x),y=as.vector(y)))} xg<-seq(0,1,0.05) yg<-seq(0,1,0.05) image(xg,yg,outer.convhull(xg,yg,x,y,f.pred)) points(x,y) } \keyword{spatial} interp/man/triSht.Rd0000644000176200001440000000462213605104165014077 0ustar liggesusers\name{triSht} \title{A triangulation object} \alias{triSht} \description{ R object that represents the triangulation of a set of 2D points, generated by \code{\link{tri.mesh}}. } \arguments{ \item{n}{Number of nodes} \item{x}{\eqn{x} coordinates of the triangulation nodes} \item{y}{\eqn{y} coordinates of the triangulation nodes} \item{nt}{number of triangles} \item{trlist}{Matrix of indices which defines the triangulation, each row corresponds to a triangle. Columns \code{i1}, \code{i2}, \code{i3} of the row \eqn{i} contain the node indices defining the \eqn{i}th triangle. Columns \code{j1}, \code{j2}, \code{j3} of the row \eqn{i} contain the indices of neighbour triangles (or 0 if no neighbour available along the convex hull). Columns \code{k1}, \code{k2}, \code{k3} of the row \eqn{i} contain the indices of the arcs of the \eqn{i}th triangle as returned by the \code{\link{arcs}} function. } \item{cclist}{ Matrix describing the circumcircles and triangles. Columns \code{x} and \code{y} contain coordinates of the circumcircle centers, \code{r} is the circumcircle radius. \code{area} is the triangle area and \code{ratio} is the ratio of the radius of the inscribed circle to the circumcircle radius. It takes it maximum value 0.5 for an equilateral triangle. The radius of the inscribed circle can be get via \eqn{r_i=\frac{r}{ratio}}. } \item{nchull}{number of points on the convex hull} \item{chull}{ A vector containing the indices of nodes forming the convec hull (in counterclockwise ordering). } \item{narcs}{number of arcs forming the triangulation} \item{arcs}{A matrix with node indices describing the arcs, contains two columns \code{from} and \code{to}. } \item{call}{call, which generated this object} } \note{ This object is not backward compatible with \code{tri} objects generated from package \code{tripack} but the functions and methods are! So you have to regenerate these objects and then you can continue to use the same calls as before. The only difference is that no constraints to the triangulation are possible in package \code{interp}. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{tri.mesh}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}} } \keyword{spatial} interp/man/voronoi.findrejectsites.Rd0000644000176200001440000000173713605104165017505 0ustar liggesusers\name{voronoi.findrejectsites} \title{Find the Voronoi sites at the border of the region (to be rejected).} \author{S. J. Eglen} \usage{voronoi.findrejectsites(voronoi.obj, xmin, xmax, ymin, ymax) } \alias{voronoi.findrejectsites} \arguments{ \item{voronoi.obj}{object of class \code{"voronoi"}} \item{xmin}{minimum x-coordinate of sites in the region} \item{xmax}{maximum x-coordinate of sites in the region} \item{ymin}{minimum y-coordinate of sites in the region} \item{ymax}{maximum y-coordinate of sites in the region} } \description{Find the sites in the Voronoi tesselation that lie at the edge of the region. A site is at the edge if any of the vertices of its Voronoi polygon lie outside the rectangle with corners (xmin,ymin) and (xmax,ymax). } \value{A logical vector of the same length as the number of sites. If the site is a reject, the corresponding element of the vector is set to TRUE.} \seealso{ \code{\link{voronoi.polygons}} } \keyword{spatial} interp/man/interpp.Rd0000644000176200001440000001150713605104165014303 0ustar liggesusers\name{interpp} \alias{interpp} \title{ Pointwise interpolate irregular gridded data } \description{ This function implements bivariate interpolation onto a set of points for irregularly spaced input data. This function is meant for backward compatibility to package \code{akima}, please use \code{\link{interp}} with its \code{output} argument set to \code{"points"} now. } \usage{ interpp(x, y = NULL, z, xo, yo = NULL, linear = TRUE, extrap = FALSE, duplicate = "error", dupfun = NULL, deltri = "shull") } \arguments{ \item{x}{ vector of x-coordinates of data points or a \code{SpatialPointsDataFrame} object. Missing values are not accepted. } \item{y}{ vector of y-coordinates of data points. Missing values are not accepted. If left as NULL indicates that \code{x} should be a \code{SpatialPointsDataFrame} and \code{z} names the variable of interest in this dataframe. } \item{z}{ vector of z-coordinates of data points or a character variable naming the variable of interest in the \code{SpatialPointsDataFrame} \code{x}. Missing values are not accepted. \code{x}, \code{y}, and \code{z} must be the same length (execpt if \code{x} is a \code{SpatialPointsDataFrame}) and may contain no fewer than four points. The points of \code{x} and \code{y} cannot be collinear, i.e, they cannot fall on the same line (two vectors \code{x} and \code{y} such that \code{y = ax + b} for some \code{a}, \code{b} will not be accepted). } \item{xo}{ vector of x-coordinates of points at which to evaluate the interpolating function. If \code{x} is a \code{SpatialPointsDataFrame} this has also to be a \code{SpatialPointsDataFrame}. } \item{yo}{ vector of y-coordinates of points at which to evaluate the interpolating function. If operating on \code{SpatialPointsDataFrame}s this is left as \code{NULL} } \item{linear}{logical -- indicating wether linear or spline interpolation should be used. } \item{extrap}{ logical flag: should extrapolation be used outside of the convex hull determined by the data points? Not possible for linear interpolation.} \item{duplicate}{ indicates how to handle duplicate data points. Possible values are \code{"error"} - produces an error message, \code{"strip"} - remove duplicate z values, \code{"mean"},\code{"median"},\code{"user"} - calculate mean , median or user defined function of duplicate z values. } \item{dupfun}{ this function is applied to duplicate points if \code{duplicate="user"} } \item{deltri}{ triangulation method used, this argument will later be moved into a control set together with others related to the spline interpolation! } } %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{ a list with 3 components: \item{x,y}{ If \code{output="grid"}: vectors of \eqn{x}- and \eqn{y}-coordinates of output grid, the same as the input argument \code{xo}, or \code{yo}, if present. Otherwise, their default, a vector 40 points evenly spaced over the range of the input \code{x} and \code{y}. If \code{output="points"}: vectors of \eqn{x}- and \eqn{y}-coordinates of output points as given by \code{xo} and \code{yo}. } \item{z}{ If \code{output="grid"}: matrix of fitted \eqn{z}-values. The value \code{z[i,j]} is computed at the point \eqn{(xo[i], yo[j])}. \code{z} has dimensions \code{length(xo)} times \code{length(yo)}. If \code{output="points"}: a vector with the calculated z values for the output points as given by \code{xo} and \code{yo}. If the input was a \code{SpatialPointsDataFrame} a \code{SpatialPixelssDataFrame} is returned for \code{output="grid"} and a \code{SpatialPointsDataFrame} for \code{output="points"}. } } \references{ Moebius, A. F. (1827) Der barymetrische Calcul. Verlag v. Johann Ambrosius Barth, Leipzig, https://books.google.at/books?id=eFPluv_UqFEC&hl=de&pg=PR1#v=onepage&q&f=false Franke, R., (1979). A critical comparison of some methods for interpolation of scattered data. Tech. Rep. NPS-53-79-003, Dept. of Mathematics, Naval Postgraduate School, Monterey, Calif. } \author{ Albrecht Gebhardt , Roger Bivand } \note{ This is only a call wrapper meant for backward compatibility, see \code{\link{interp}} for more details! } \seealso{ \code{\link{interp}} } \examples{ ### Use all datasets from Franke, 1979: ### calculate z at shifted original locations. data(franke) for(i in 1:5) for(j in 1:3){ FR <- franke.data(i,j,franke) IL <- with(FR, interpp(x,y,z,x+0.1,y+0.1,linear=TRUE)) str(IL) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ spatial } interp/man/print.voronoi.Rd0000644000176200001440000000076313605104165015452 0ustar liggesusers\name{print.voronoi} \title{Print a voronoi object} \usage{\method{print}{voronoi}(x,...) } \alias{print.voronoi} \arguments{ \item{x}{object of class \code{"voronoi"}} \item{...}{additional paramters for \code{print}} } \description{prints a summary of \code{"x"} } \value{None } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{voronoi}}, \code{\link{plot.voronoi}}, \code{\link{summary.voronoi}} } \keyword{spatial} interp/man/print.summary.triSht.Rd0000644000176200001440000000156113605104165016725 0ustar liggesusers\name{print.summary.triSht} \title{Print a summary of a triangulation object} \usage{\method{print}{summary.triSht}(x, ...)} \alias{print.summary.triSht} \arguments{ \item{x}{object of class \code{"summary.triSht"}, generated by \code{\link{summary.triSht}}.} \item{...}{additional paramters for \code{print}} } \description{ Prints some information about \code{tri.obj} } \value{None } \note{ This function is meant as replacement for the function of same name in package \code{tripack}. The only difference is that no constraints are possible with \code{triSht} objects of package \code{interp}. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}},\code{\link{tri.mesh}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}}. } \keyword{spatial} interp/man/on.convex.hull.Rd0000644000176200001440000000370213605174655015513 0ustar liggesusers\name{on.convex.hull} \title{Determines if points are on or in the convex hull of a triangulation object} \usage{ on.convex.hull(tri.obj, x, y, eps=1E-16) in.convex.hull(tri.obj, x, y, eps=1E-16, strict=TRUE) } \alias{on.convex.hull} \alias{in.convex.hull} \arguments{ \item{tri.obj}{object of class \code{\link{triSht}}} \item{x}{vector of \eqn{x}-coordinates of points to locate} \item{y}{vector of \eqn{y}-coordinates of points to locate} \item{eps}{accuracy for checking the condition} \item{strict}{logical, default \code{TRUE}. It indicates if the convex hull is treated as an open (\code{strict=TRUE}) or closed (\code{strict=FALSE}) set. (applies only to \code{in.convex.hull})} } \description{ Given a triangulation object \code{tri.obj} of \eqn{n} points in the plane, this subroutine returns a logical vector indicating if the points \eqn{(x_i,y_i)} lay on or in the convex hull of \code{tri.obj}. } \value{ Logical vector. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}}, \code{\link{triangles}}, \code{\link{convex.hull}}. } \examples{ # use a part of the quakes data set: data(quakes) quakes.part<-quakes[(quakes[,1]<=-10.78 & quakes[,1]>=-19.4 & quakes[,2]<=182.29 & quakes[,2]>=165.77),] q.tri<-tri.mesh(quakes.part$lon, quakes.part$lat, duplicate="remove") on.convex.hull(q.tri,quakes.part$lon[1:20],quakes.part$lat[1:20]) # Check with part of data set: # Note that points on the hull (see above) get marked FALSE below: in.convex.hull(q.tri,quakes.part$lon[1:20],quakes.part$lat[1:20]) # If points both on the hull and in the interior of the hull are meant # disable strict mode: in.convex.hull(q.tri,quakes.part$lon[1:20],quakes.part$lat[1:20],strict=FALSE) # something completely outside: in.convex.hull(q.tri,c(170,180),c(-20,-10)) } \keyword{spatial} interp/man/on.Rd0000644000176200001440000000322213605104165013231 0ustar liggesusers\name{on} \alias{on} \alias{left} \title{ Determines if a point is on or left of the vector described by two other points. } \description{ A simple test function to determine the position of one (or more) points relative to a vector spanned by two points. } \usage{ on(x1, y1, x2, y2, x0, y0, eps = 1e-16) left(x1, y1, x2, y2, x0, y0, eps = 1e-16) } \arguments{ \item{x1}{ \code{x} coordinate of first point determinig the vector. } \item{y1}{ \code{y} coordinate of first point determinig the vector. } \item{x2}{ \code{x} coordinate of second point determinig the vector. } \item{y2}{ \code{y} coordinate of second point determinig the vector. } \item{x0}{ vector of \code{x} coordinates to locate relative to the vector \eqn{(x_2-x_1, y_2-y_1)}. } \item{y0}{ vector of \code{x} coordinates to locate relative to the vector \eqn{(x_2-x_1, y_2-y_1)}. } \item{eps}{ tolerance for checking if \eqn{x_0,y_0} is on or left of \eqn{(x_2-x_1, y_2-y_1)}, defaults to \eqn{10^{-16}}. } } \value{ logical vector with the results of the test. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{in.convex.hull}}, \code{\link{on.convex.hull}}. } \examples{ y <- x <- c(0,1) ## should be TRUE on(x[1],y[1],x[2],y[2],0.5,0.5) ## note the default setting of eps leading to on(x[1],y[1],x[2],y[2],0.5,0.50000000000000001) ## also be TRUE ## should be TRUE left(x[1],y[1],x[2],y[2],0.5,0.6) ## note the default setting of eps leading to left(x[1],y[1],x[2],y[2],0.5,0.50000000000000001) ## already resulting to FALSE } \keyword{ utilities } interp/man/voronoi.mosaic.Rd0000644000176200001440000000540513605104165015567 0ustar liggesusers\name{voronoi.mosaic} \alias{voronoi.mosaic} \title{ Voronoi mosaic } \description{ This function creates a Voronoi mosaic out of a given set of arbitraryly located points in the plane. Each cell of a voronoi mosaic is associated with a data point and contains all points \eqn{(x,y)} closest to this data point. } \usage{ voronoi.mosaic(x, y = NULL, duplicate = "error") } \arguments{ \item{x}{ vector containing \eqn{x} coordinates of the data. If \code{y} is missing \code{x} should be a list or dataframe with two components \code{x} and \code{y}. \code{x} can also be an object of class \code{\link{triSht}} generated by \code{\link{tri.mesh}}. In this case the internal triangulation step can be skipped. } \item{y}{ vector containing \eqn{y} coordinates of the data. Can be omitted if \code{x} is a list with two components \code{x} and \code{y}. } \item{duplicate}{ flag indicating how to handle duplicate elements. Possible values are: \itemize{ \item{ \code{"error"} -- default, } \item{ \code{"strip"} -- remove all duplicate points, } \item{ \code{"remove"} -- leave one point of the duplicate points. } } } } \details{ The function creates first a Delaunay triangulation (if not already given), extracts the circumcircle centers of these triangles, and then connects these points according to the neighbourhood relations between the triangles. } \value{ An object of class \code{\link{voronoi}}. } \references{ G. Voronoi, Nouvelles applications des parametres continus a la theorie des formes quadratiques. Deuxieme memoire. Recherches sur les parallelloedres primitifs, Journal fuer die reine und angewandte Mathematik, 1908, vol 134, p. 198-287 } \author{ Albrecht Gebhardt , Roger Bivand } \note{ This function is meant as a replacement for \code{\link[tripack]{voronoi.mosaic}} from package \code{tripack}. Please note that the underlying triangulation uses a different algorithm, see \code{\link{tri.mesh}}. Contrary to \code{tri.mesh} this should not affect the result for non unique triangulations e.g. on regular grids as the voronoi mosaic in this case will still be unique. The arguments are backward compatible, even the returned object should be compatible with functions from package \code{tripack}. } \seealso{ \code{\link{voronoi}},\code{\link{voronoi.mosaic}}, \code{\link{print.voronoi}}, \code{\link{plot.voronoi}} } \examples{ data(franke) fd <- franke$ds3 vr <- voronoi.mosaic(fd$x, fd$y) summary(vr) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ spatial }% use one of RShowDoc("KEYWORDS") interp/man/nearest.neighbours.Rd0000644000176200001440000000243413605104165016426 0ustar liggesusers\name{nearest.neighbours} \alias{nearest.neighbours} \title{ Nearest neighbour structure for a data set } \description{ This function can be used to generate nearest neighbour information for a set of 2D data points. } \usage{ nearest.neighbours(x, y) } \arguments{ \item{x}{ vector containing \eqn{x} ccordinates of points. } \item{y}{ vector containing \eqn{x} ccordinates of points. } } \details{ The C++ implementation of this function is used inside the \code{\link{locpoly}} and \code{\link{interp}} functions. } \value{ A list with two components \item{index}{ A matrix with one row per data point. Each row contains the indices of the nearest neigbours to the point associated with this row, currently the point itself is also listed in the first row, so this matrix is of dimension \eqn{n} times \eqn{n} (will change to \eqn{n} times \eqn{n-1} later). } \item{dist}{ A matrix containing the distances according to the neigbours listed in component \code{index}. } } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{convex.hull}} } \examples{ data(franke) ## use only a small subset fd <- franke$ds1[1:5,] nearest.neighbours(fd$x,fd$y) } \keyword{ utilities } interp/man/voronoi.polygons.Rd0000644000176200001440000000146213605104165016165 0ustar liggesusers\name{voronoi.polygons} \alias{voronoi.polygons} \title{ extract polygons from a voronoi mosaic } \description{ This functions extracts polygons from a \code{voronoi.mosaic} object. } \usage{ voronoi.polygons(voronoi.obj) } \arguments{ \item{voronoi.obj}{ object of class \code{voronoi.mosaic} } } \value{ Returns an object of class \code{voronoi.polygons} with unamed list elements for each polygon. These list elements are matrices with columns \code{x} and \code{y}. Unbounded polygons along the border are represented by \code{NULL} instead of a matrix. } \author{ Denis White } \seealso{ \code{\link{plot.voronoi.polygons}},\code{\link{voronoi.mosaic}}} \examples{ data(franke) fd3 <- franke$ds3 fd3.vm <- voronoi.mosaic(fd3$x,fd3$y) fd3.vp <- voronoi.polygons(fd3.vm) fd3.vp } \keyword{ spatial } interp/man/summary.triSht.Rd0000644000176200001440000000204413605104165015567 0ustar liggesusers\name{summary.triSht} \title{Return a summary of a triangulation object} \usage{\method{summary}{triSht}(object,...) } \alias{summary.triSht} \arguments{ \item{object}{object of class \code{"triSht"}} \item{...}{additional paramters for \code{summary}} } \description{ Returns some information (number of nodes, triangles, arcs) about \code{object}. } \value{An object of class \code{"summary.triSht"}, to be printed by \code{\link{print.summary.triSht}}. It contains the number of nodes (\code{n}), of arcs (\code{na}), of boundary nodes (\code{nb}) and triangles (\code{nt}). } \note{ This function is meant as replacement for the function of same name in package \code{tripack}. The only difference is that no constraints are possible with \code{triSht} objects of package \code{interp}. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{print.summary.triSht}}. } \keyword{spatial} interp/man/voronoi.Rd0000644000176200001440000000270213605104165014312 0ustar liggesusers\name{voronoi} \title{Voronoi object} \alias{voronoi} \arguments{ \item{x,y}{x and y coordinates of nodes of the voronoi mosaic. Each node is a circumcircle center of some triangle from the Delaunay triangulation.} \item{node}{logical vector, indicating real nodes of the voronoi mosaic. These nodes are the centers of circumcircles of triangles with positive area of the delaunay triangulation. If \code{node[i]}=\code{FALSE}, (\code{c[i]},\code{x[i]}) belongs to a triangle with area 0.} \item{n1,n2,n3}{indices of neighbour nodes. Negative indices indicate dummy points as neighbours.} \item{tri}{triangulation object, see \code{\link{triSht}}.} \item{area}{area of triangle \eqn{i}. } \item{ratio}{aspect ratio (inscribed radius/circumradius) of triangle \eqn{i}.} \item{radius}{circumradius of triangle i.} \item{dummy.x,dummy.y}{x and y coordinates of dummy points. They are used for plotting of unbounded tiles.} } \description{ A \code{voronoi} object is created with \code{\link{voronoi.mosaic}} } \note{ This version of \code{voronoi} object is generated from the \code{\link{tri.mesh}} function from package \code{interp}. That's the only difference to \code{voronoi} objects generated with package \code{tripack}. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{voronoi.mosaic}},\code{\link{plot.voronoi}} } \keyword{spatial} interp/man/plot.voronoi.Rd0000644000176200001440000000350613605104165015272 0ustar liggesusers\name{plot.voronoi} \title{Plot a voronoi object} \usage{\method{plot}{voronoi}(x,add=FALSE, xlim=c(min(x$tri$x)- 0.1*diff(range(x$tri$x)), max(x$tri$x)+ 0.1*diff(range(x$tri$x))), ylim=c(min(x$tri$y)- 0.1*diff(range(x$tri$y)), max(x$tri$y)+ 0.1*diff(range(x$tri$y))), all=FALSE, do.points=TRUE, main="Voronoi mosaic", sub=deparse(substitute(x)), isometric=TRUE, ...) } \alias{plot.voronoi} \arguments{ \item{x}{object of class \code{"voronoi"}} \item{add}{logical, if \code{TRUE}, add to a current plot.} \item{xlim}{x plot ranges, by default modified to hide dummy points outside of the plot} \item{ylim}{y plot ranges, by default modified to hide dummy points outside of the plot} \item{all}{show all (including dummy points in the plot} \item{do.points}{logical, indicates if points should be plotted.} \item{main}{plot title} \item{sub}{plot subtitle} \item{isometric}{generate an isometric plot (default \code{TRUE})} \item{...}{additional plot parameters} } \description{Plots the mosaic \code{"x"}. Dashed lines are used for outer tiles of the mosaic. } \value{None } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{voronoi}}, \code{\link{print.voronoi}}, \code{\link{summary.voronoi}}, \code{\link{plot.voronoi.polygons}} } \examples{ data(franke) tr <- tri.mesh(franke$ds3) vr <- voronoi.mosaic(tr) plot(tr) plot(vr,add=TRUE) } \keyword{spatial} interp/man/interp-package.Rd0000644000176200001440000000306213605104165015511 0ustar liggesusers\name{interp-package} \alias{interp-package} \docType{package} \title{ Interpolation of data } \description{ Interpolation of \eqn{z} values given regular or irregular gridded data sets containing coordinates \eqn{(x_i,y_i)} and function values \eqn{z_i} is (will be) available through this package. As this interpolation is (for the irregular gridded data case) based on trianglation of the data locations also triangulation functions are implemented. Moreover the (not yet finished) spline interpolation needs estimators for partial derivates, these are also made available to the end user for direct use. } \details{ The interpolation use can be divided by the used method into piecewise linear (finished in 1_0.27) and spline (not yet finished) interpolation and by input and output settings into gridded and pointwise setups. } \note{ This package is a FOSS replacement for the ACM licensed packages \code{akima} and \code{tripack}. The function calls are backward compatible. } \author{ Albrecht Gebhardt , Roger Bivand Maintainer: Albrecht Gebhardt } %\references{ % This optional section can contain literature or other references for % background information. %} \keyword{ package } \seealso{ \code{\link{interp}}, \code{\link{tri.mesh}}, \code{\link{voronoi.mosaic}}, \code{\link{locpoly}} } %\examples{ % \dontrun{ % ## Optional simple examples of the most important functions % ## These can be in \dontrun{} and \donttest{} blocks. % } %} interp/man/plot.voronoi.polygons.Rd0000644000176200001440000000153113605104165017137 0ustar liggesusers\name{plot.voronoi.polygons} \alias{plot.voronoi.polygons} \title{plots an voronoi.polygons object} \description{ plots an \code{voronoi.polygons} object } \usage{ \method{plot}{voronoi.polygons}(x, which, color=TRUE, isometric=TRUE, ...) } \arguments{ \item{x}{ object of class \code{voronoi.polygons} } \item{which}{ index vector selecting which polygons to plot } \item{color}{ logical, determines if plot should be colored, default: \code{TRUE} } \item{isometric}{generate an isometric plot (default \code{TRUE})} \item{\dots}{ additional plot arguments } } \author{ A. Gebhardt} \seealso{ \code{\link{voronoi.polygons}}} \examples{ data(franke) fd3 <- franke$ds3 fd3.vm <- voronoi.mosaic(fd3$x,fd3$y) fd3.vp <- voronoi.polygons(fd3.vm) plot(fd3.vp) plot(fd3.vp,which=c(3,4,6,10)) } \keyword{ spatial }% at least one, from doc/KEYWORDS interp/man/tri.find.Rd0000644000176200001440000000371313605104165014337 0ustar liggesusers\name{tri.find} \title{Locate a point in a triangulation} \usage{ tri.find(tri.obj,x,y) } \alias{tri.find} \arguments{ \item{tri.obj}{an triangulation object of class \code{triSht}} \item{x}{x-coordinate of the point} \item{y}{y-coordinate of the point} } \description{ This subroutine locates a point \eqn{P=(x,y)} relative to a triangulation created by \code{tri.mesh}. If \eqn{P} is contained in a triangle, the three vertex indexes are returned. Otherwise, the indexes of the rightmost and leftmost visible boundary nodes are returned. } \value{ A list with elements \code{i1},\code{i2},\code{i3} containing nodal indexes, in counterclockwise order, of the vertices of a triangle containing \eqn{P=(x,y)}. \code{bc} contains the barycentric coordinates of \eqn{P} w.r.t. the found triangle. If \eqn{P} is not contained in the convex hull of the nodes this indices are 0 (\code{bc} is meaningless then). % CHECKME (maybe differs from tripack!): %, \code{i1} indexes the rightmost visible %boundary node, \code{i2} indexes the leftmost visible boundary node, %and \code{i3} = 0. Rightmost and leftmost are defined from the %perspective of \eqn{P}, and a pair of points are visible from each other if %and only if the line segment joining them intersects no triangulation %arc. If \eqn{P} and all of the nodes lie on a common line, then %\code{i1}=\code{i2}=\code{i3} = 0 on output. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}}, \code{\link{triangles}}, \code{\link{convex.hull}} } \examples{ data(franke) tr<-tri.mesh(franke$ds3$x,franke$ds3$y) plot(tr) pnt<-list(x=0.3,y=0.4) triangle.with.pnt<-tri.find(tr,pnt$x,pnt$y) attach(triangle.with.pnt) lines(franke$ds3$x[c(i1,i2,i3,i1)],franke$ds3$y[c(i1,i2,i3,i1)],col="red") points(pnt$x,pnt$y) } \keyword{spatial} interp/man/interp.Rd0000644000176200001440000001635213605104165014126 0ustar liggesusers\name{interp} \alias{interp} \title{ Interpolation function } \description{ This function currently implements piecewise linear interpolation (=barycentric interpolation). } \usage{ interp(x, y = NULL, z, xo = seq(min(x), max(x), length = nx), yo = seq(min(y), max(y), length = ny), linear = (method == "linear"), extrap = FALSE, duplicate = "error", dupfun = NULL, nx = 40, ny = 40, input="points", output = "grid", method = "linear", deltri = "shull") } \arguments{ \item{x}{ vector of \eqn{x}-coordinates of data points or a \code{SpatialPointsDataFrame} object. Missing values are not accepted. } \item{y}{ vector of \eqn{y}-coordinates of data points. Missing values are not accepted. If left as NULL indicates that \code{x} should be a \code{SpatialPointsDataFrame} and \code{z} names the variable of interest in this dataframe. } \item{z}{ vector of \eqn{z}-values at data points or a character variable naming the variable of interest in the \code{SpatialPointsDataFrame} \code{x}. Missing values are not accepted. \code{x}, \code{y}, and \code{z} must be the same length (execpt if \code{x} is a \code{SpatialPointsDataFrame}) and may contain no fewer than four points. The points of \code{x} and \code{y} should not be collinear, i.e, they should not fall on the same line (two vectors \code{x} and \code{y} such that \code{y = ax + b} for some \code{a}, \code{b} will not produce menaningful results). \code{interp} is meant for cases in which you have \eqn{x}, \eqn{y} values scattered over a plane and a \eqn{z} value for each. If, instead, you are trying to evaluate a mathematical function, or get a graphical interpretation of relationships that can be described by a polynomial, try \code{\link{outer}}. } \item{xo}{ If \code{output="grid"} (default): sequence of \eqn{x} locations for rectangular output grid, defaults to \code{nx} points between \code{min(x)} and \code{max(x)}. If \code{output="points"}: vector of \eqn{x} locations for output points. } \item{yo}{ If \code{output="grid"} (default): sequence of \eqn{y} locations for rectangular output grid, defaults to \code{ny} points between \code{min(y)} and \code{max(y)}. If \code{output="points"}: vector of \eqn{y} locations for output points. In this case it has to be same length as \code{xo}. } \item{input}{ text, possible values are \code{"grid"} (not yet implemented) and \code{"points"} (default). This is used to distinguish between regular and irregular gridded data. } \item{output}{ text, possible values are \code{"grid"} (=default) and \code{"points"}. If \code{"grid"} is choosen then \code{xo} and \code{yo} are interpreted as vectors spanning a rectangular grid of points \eqn{(xo[i],yo[j])}, \eqn{i=1,...,nx}, \eqn{j=1,...,ny}. This default behaviour matches how \code{akima::interp} works. In the case of \code{"points"} \code{xo} and \code{yo} have to be of same length and are taken as possibly irregular spaced output points \eqn{(xo[i],yo[i])}, \eqn{i=1,...,no} with \code{no=length(xo)}. \code{nx} and \code{ny} are ignored in this case. This case is meant as replacement for the pointwise interpolation done by \code{akima::interpp}. If the input \code{x} is a \code{SpatialPointsDataFrame} and \code{output="points"} then \code{xo} has to be a \code{SpatialPointsDataFrame}, \code{yo} will be ignored. } \item{linear}{ logical, only for backward compatibility with \code{akima::interp}, indicates if piecewise linear interpolation or Akima splines should be used. Warning: in this release only \code{linear=TRUE} is implemented! Please use the new \code{method} argument instead! } \item{method}{ text, possible methods are (currently only, more is under developement) \code{"linear"} (piecewise linear interpolation within the triangles of the Delauney triangulation, also referred to as barycentric interpolation based on barycentric coordinates). This replaces the old \code{linear} argument of \code{akima::interp}. } \item{extrap}{ logical, indicates if extrapolation outside the convex hull is intended, will not work for piecewise linear interpolation! } \item{duplicate}{ character string indicating how to handle duplicate data points. Possible values are \describe{ \item{\code{"error"}}{produces an error message,} \item{\code{"strip"}}{remove duplicate z values,} \item{\code{"mean"},\code{"median"},\code{"user"}}{calculate mean , median or user defined function (\code{dupfun}) of duplicate \eqn{z} values.} } } \item{dupfun}{ a function, applied to duplicate points if \code{duplicate= "user"}.} \item{nx}{ dimension of output grid in x direction } \item{ny}{ dimension of output grid in y direction } \item{deltri}{ triangulation method used, this argument will later be moved into a control set together with others related to the spline interpolation! Possible values are \code{"shull"} (default, sweep hull algorithm) and \code{"deldir"} (uses package\code{deldir}). } } %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{ a list with 3 components: \item{x,y}{ If \code{output="grid"}: vectors of \eqn{x}- and \eqn{y}-coordinates of output grid, the same as the input argument \code{xo}, or \code{yo}, if present. Otherwise, their default, a vector 40 points evenly spaced over the range of the input \code{x} and \code{y}. If \code{output="points"}: vectors of \eqn{x}- and \eqn{y}-coordinates of output points as given by \code{xo} and \code{yo}. } \item{z}{ If \code{output="grid"}: matrix of fitted \eqn{z}-values. The value \code{z[i,j]} is computed at the point \eqn{(xo[i], yo[j])}. \code{z} has dimensions \code{length(xo)} times \code{length(yo)}. If \code{output="points"}: a vector with the calculated z values for the output points as given by \code{xo} and \code{yo}. If the input was a \code{SpatialPointsDataFrame} a \code{SpatialPixelsDataFrame} is returned for \code{output="grid"} and a \code{SpatialPointsDataFrame} for \code{output="points"}. } } \references{ Moebius, A. F. (1827) Der barymetrische Calcul. Verlag v. Johann Ambrosius Barth, Leipzig, https://books.google.at/books?id=eFPluv_UqFEC&hl=de&pg=PR1#v=onepage&q&f=false Franke, R., (1979). A critical comparison of some methods for interpolation of scattered data. Tech. Rep. NPS-53-79-003, Dept. of Mathematics, Naval Postgraduate School, Monterey, Calif. } \author{ Albrecht Gebhardt , Roger Bivand } %\note{ %% ~~further notes~~ %} \seealso{ \code{\link{interpp}} } \examples{ ### Use all datasets from Franke, 1979: data(franke) for(i in 1:5) for(j in 1:3){ FR <- franke.data(i,j,franke) IL <- with(FR, interp(x,y,z,method="linear")) image(IL) contour(IL,add=TRUE) with(FR,points(x,y)) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ dplot } \keyword{ math } interp/man/triangles.Rd0000644000176200001440000000300213605104165014601 0ustar liggesusers\name{triangles} \alias{triangles} \title{Extract a list of triangles from a triangulation object} \usage{triangles(tri.obj) } \arguments{ \item{tri.obj}{object of class \code{\link{triSht}}} } \description{ This function extracts a list of triangles from an triangulation object created by \code{tri.mesh}. } \details{ The vertices in the returned matrix (let's denote it with \code{retval}) are ordered counterclockwise. The columns \code{tr}\eqn{x} and \code{arc}\eqn{x}, \eqn{x=1,2,3} index the triangle and arc, respectively, which are opposite (not shared by) node \code{node}\eqn{x}, with \code{tri}\eqn{x=0} if \code{arc}\eqn{x} indexes a boundary arc. Vertex indexes range from 1 to \eqn{n}, the number of nodes, triangle indexes from 0 to \eqn{nt}, and arc indexes from 1 to \eqn{na = nt+n-1}. } \value{ A matrix with columns \code{node1}, \code{node2}, \code{node3}, representing the vertex nodal indexes, \code{tr1}, \code{tr2}, \code{tr3}, representing neighboring triangle indexes and \code{arc1}, \code{arc2}, \code{arc3} reresenting arc indexes. Each row represents one triangle. } %\references{ %} \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}}, \code{\link{triangles}} } \examples{ # use the smallest Franke data set data(franke) fr3.tr<-tri.mesh(franke$ds3$x, franke$ds3$y) triangles(fr3.tr) } \keyword{spatial} interp/man/voronoi.area.Rd0000644000176200001440000000133313605104165015220 0ustar liggesusers\name{voronoi.area} \title{Calculate area of Voronoi polygons} \author{S. J. Eglen} \usage{voronoi.area(voronoi.obj) } \alias{voronoi.area} \arguments{ \item{voronoi.obj}{object of class \code{"voronoi"}} } \description{Computes the area of each Voronoi polygon. For some sites at the edge of the region, the Voronoi polygon is not bounded, and so the area of those sites cannot be calculated, and hence will be \code{NA}. } \value{A vector of polygon areas.} \seealso{ \code{\link{voronoi.mosaic}},\code{\link{voronoi.polygons}}, } \keyword{spatial} \examples{ data(franke) fd3 <- franke$ds3 fd3.vm <- voronoi.mosaic(fd3$x,fd3$y) fd3.vm.areas <- voronoi.area(fd3.vm) plot(fd3.vm) text(fd3$x, fd3$y, round(fd3.vm.areas,5)) } interp/man/convex.hull.Rd0000644000176200001440000000272313605104165015067 0ustar liggesusers\name{convex.hull} \title{Return the convex hull of a triangulation object} \usage{convex.hull(tri.obj, plot.it=FALSE, add=FALSE,...) } \alias{convex.hull} \arguments{ \item{tri.obj}{object of class \code{\link{triSht}}} \item{plot.it}{logical, if \code{TRUE} the convex hull of \code{tri.obj} will be plotted.} \item{add}{logical. if \code{TRUE} (and \code{plot.it=TRUE}), add to a current plot.} \item{...}{additional plot arguments} } \description{ Given a triangulation \code{tri.obj} of \eqn{n} points in the plane, this subroutine returns two vectors containing the coordinates of the nodes on the boundary of the convex hull. } \value{ \item{x}{x coordinates of boundary nodes.} \item{y}{y coordinates of boundary nodes.} } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}}, \code{\link{triangles}}. } \examples{ ## random points: rand.tr<-tri.mesh(runif(10),runif(10)) plot(rand.tr) rand.ch<-convex.hull(rand.tr, plot.it=TRUE, add=TRUE, col="red") ## use a part of the quakes data set: data(quakes) quakes.part<-quakes[(quakes[,1]<=-17 & quakes[,1]>=-19.0 & quakes[,2]<=182.0 & quakes[,2]>=180.0),] quakes.tri<-tri.mesh(quakes.part$lon, quakes.part$lat, duplicate="remove") plot(quakes.tri) convex.hull(quakes.tri, plot.it=TRUE, add=TRUE, col="red") } \keyword{spatial} interp/man/arcs.Rd0000644000176200001440000000167213605104165013554 0ustar liggesusers\name{arcs} \alias{arcs} \title{ Extract a list of arcs from a triangulation object. } \description{ This function extracts a list of arcs from a triangulation object created by \code{tri.mesh}. } \usage{ arcs(tri.obj) } \arguments{ \item{tri.obj}{ object of class \code{\link{triSht}} } } \details{ This function acesses the \code{arcs} component of a triangulation object returned by \code{\link{tri.mesh}} and extracts the arcs contained in this triangulation. This is e.g. used for plotting. } \value{ A matrix with two columns \code{"from"} and \code{"to"} containing the indices of points connected by the arc with the corresponding row index. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{triangles}}, \code{\link{area}} } \examples{ data(franke) tr <- tri.mesh(franke$ds3) arcs(tr) } \keyword{ spatial } \keyword{ dplot } interp/man/identify.tri.Rd0000644000176200001440000000145413605104165015232 0ustar liggesusers\name{identify.triSht} \title{Identify points in a triangulation plot} \usage{\method{identify}{triSht}(x,...) } \alias{identify.triSht} \arguments{ \item{x}{object of class \code{\link{triSht}}} \item{...}{additional paramters for \code{identify}} } \description{Identify points in a plot of \code{"x"} with its coordinates. The plot of \code{"x"} must be generated with \code{plot.tri}. } \value{an integer vector containing the indexes of the identified points. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}} } \examples{ \dontrun{ data(franke) tr <- tri.mesh(franke$ds3$x, franke$ds3$y) plot(tr) identify(tr) } } \keyword{spatial} interp/man/print.summary.voronoi.Rd0000644000176200001440000000146313605104165017144 0ustar liggesusers\name{print.summary.voronoi} \title{Print a summary of a voronoi object} \usage{\method{print}{summary.voronoi}(x, ...)} \alias{print.summary.voronoi} \arguments{ \item{x}{object of class \code{"summary.voronoi"}, generated by \code{\link{summary.voronoi}}.} \item{...}{additional paramters for \code{print}} } \description{ Prints some information about object \code{x} } \value{None } \note{ This function is meant as replacement for the function of same name in package \code{tripack} and should be fully backward compatible. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{voronoi}},\code{\link{voronoi.mosaic}}, \code{\link{print.voronoi}}, \code{\link{plot.voronoi}}, \code{\link{summary.voronoi}}. } \keyword{spatial} interp/man/tri.mesh.Rd0000644000176200001440000001135413605104165014353 0ustar liggesusers\name{tri.mesh} \alias{tri.mesh} \title{ Delaunay triangulation } \description{ This function generates a Delaunay triangulation of arbitrarily distributed points in the plane. The resulting object can be printed or plotted, some additional functions can extract details from it like the list of triangles, arcs or the convex hull. } \usage{ tri.mesh(x, y = NULL, duplicate = "error") } \arguments{ \item{x}{ vector containing \eqn{x} coordinates of the data. If \code{y} is missing \code{x} should be a list or dataframe with two components \code{x} and \code{y}. } \item{y}{ vector containing \eqn{y} coordinates of the data. Can be omitted if \code{x} is a list with two components \code{x} and \code{y}. } \item{duplicate}{ flag indicating how to handle duplicate elements. Possible values are: \itemize{ \item{ \code{"error"} -- default, } \item{ \code{"strip"} -- remove all duplicate points, } \item{ \code{"remove"} -- leave one point of the duplicate points. } } } } \details{ This function creates a Delaunay triangulation of a set of arbitrarily distributed points in the plane referred to as nodes. The Delaunay triangulation is defined as a set of triangles with the following five properties: \enumerate{ \item The triangle vertices are nodes. \item No triangle contains a node other than its vertices. \item The interiors of the triangles are pairwise disjoint. \item The union of triangles is the convex hull of the set of nodes (the smallest convex set which contains the nodes). \item The interior of the circumcircle of each triangle contains no node. } The first four properties define a triangulation, and the last property results in a triangulation which is as close as possible to equiangular in a certain sense and which is uniquely defined unless four or more nodes lie on a common circle. This property makes the triangulation well-suited for solving closest point problems and for triangle-based interpolation. This triangulation is based on the s-hull algorithm by David Sinclair. It consist of two steps: \enumerate{ \item{ Create an initial non-overlapping triangulation from the radially sorted nodes (w.r.t to an arbitrary first node). Starting from a first triangle built from the first node and its nearest neigbours this is done by adding triangles from the next node (in the sense of distance to the first node) to the hull of the actual triangulation visible from this node (sweep hull step). } \item{ Apply triange flipping to each pair of triangles sharing a border until condition 5 holds (Cline-Renka test). } } This algorithm has complexicity \eqn{O(n*log(n))}. } \value{ an object of class \code{"triSht"}, see \code{\link{triSht}}. } \references{ B. Delaunay, Sur la sphere vide. A la memoire de Georges Voronoi, Bulletin de l'Academie des Sciences de l'URSS. Classe des sciences mathematiques et na, 1934, no. 6, p. 793--800 D. A. Sinclair, S-Hull: A Fast Radial Sweep-Hull Routine for Delaunay Triangulation. https://arxiv.org/pdf/1604.01428.pdf, 2016. } \author{ Albrecht Gebhardt , Roger Bivand } \note{ This function is meant as a replacement for \code{\link[tripack]{tri.mesh}} from package \code{tripack}. Please note that the underlying algorithm changed from Renka's method to Sinclair's sweep hull method. Delaunay triangulations are unique if no four or more points exist which share the same circumcircle. Otherwise several solutions are available and different algorithms will give different results. This especially holds for regular grids, where in the case of rectangular gridded points each grid cell can be triangulated in two different ways. The arguments are backward compatible, but the returned object is not compatible with package \code{tripack} (it provides a \code{\link[tripack]{tri}} object type)! But you can apply methods with same names to the object returned in package \code{interp} which is of type \code{\link{triSht}}, so you can reuse your old code but you cannot reuse your old saved workspace. } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}}, \code{\link{triangles}}, \code{\link{convex.hull}}, \code{\link{arcs}}. } \examples{ ## use Frankes datasets: data(franke) tr1 <- tri.mesh(franke$ds3$x, franke$ds3$y) tr1 tr2 <- tri.mesh(franke$ds2) summary(tr2) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ spatial } interp/man/print.triSht.Rd0000644000176200001440000000077113605104165015233 0ustar liggesusers\name{print.triSht} \title{Print a triangulation object} \usage{\method{print}{triSht}(x,...) } \alias{print.triSht} \arguments{ \item{x}{object of class \code{"triSht"}} \item{...}{additional paramters for \code{print}} } \description{prints a adjacency list of \code{"x"} } \value{None } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{plot.triSht}}, \code{\link{summary.triSht}} } \keyword{spatial} interp/man/summary.voronoi.Rd0000644000176200001440000000155213605104165016010 0ustar liggesusers\name{summary.voronoi} \title{Return a summary of a voronoi object} \usage{\method{summary}{voronoi}(object,...) } \alias{summary.voronoi} \arguments{ \item{object}{object of class \code{"voronoi"}} \item{...}{additional parameters for \code{summary}} } \description{ Returns some information about \code{object} } \value{Object of class \code{"summary.voronoi"}. It contains the number of nodes (\code{nn}) and dummy nodes (\code{nd}). } \note{ This function is meant as replacement for the function of same name in package \code{tripack} and should be fully backward compatible. } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{voronoi}},\code{\link{voronoi.mosaic}}, \code{\link{print.voronoi}}, \code{\link{plot.voronoi}}, \code{\link{print.summary.voronoi}}. } \keyword{spatial} interp/man/plot.triSht.Rd0000644000176200001440000000342113605104165015050 0ustar liggesusers\name{plot.triSht} \title{Plot a triangulation object} \usage{\method{plot}{triSht}(x, add = FALSE, xlim = range(x$x), ylim = range(x$y), do.points = TRUE, do.labels = FALSE, isometric = TRUE, do.circumcircles = FALSE, segment.lty = "dashed", circle.lty = "dotted", ...) } \alias{plot.triSht} \arguments{ \item{x}{object of class \code{"triSht"}} \item{add}{logical, if \code{TRUE}, add to a current plot.} \item{do.points}{logical, indicates if points should be plotted. (default \code{TRUE})} \item{do.labels}{logical, indicates if points should be labelled. (default \code{FALSE})} \item{xlim,ylim}{x/y ranges for plot} \item{isometric}{generate an isometric plot (default \code{TRUE})} \item{do.circumcircles}{logical, indicates if circumcircles should be plotted (default \code{FALSE})} \item{segment.lty}{line type for triangulation segments} \item{circle.lty}{line type for circumcircles} \item{...}{additional plot parameters} } \description{plots the triangulation object \code{"x"} } \value{None } \author{ Albrecht Gebhardt , Roger Bivand } \seealso{ \code{\link{triSht}}, \code{\link{print.triSht}}, \code{\link{summary.triSht}} } \examples{ ## random points plot(tri.mesh(rpois(100,lambda=20),rpois(100,lambda=20),duplicate="remove")) ## use a part of the quakes data set: data(quakes) quakes.part<-quakes[(quakes[,1]<=-10.78 & quakes[,1]>=-19.4 & quakes[,2]<=182.29 & quakes[,2]>=165.77),] quakes.tri<-tri.mesh(quakes.part$lon, quakes.part$lat, duplicate="remove") plot(quakes.tri) ## use the whole quakes data set ## (will not work with standard memory settings, hence commented out) ## plot(tri.mesh(quakes$lon, quakes$lat, duplicate="remove"), do.points=F) } \keyword{spatial} interp/DESCRIPTION0000644000176200001440000000415313605457471013300 0ustar liggesusersPackage: interp Type: Package Title: Interpolation Methods Version: 1.0-33 Date: 2020-01-07 Authors@R: c(person("Albrecht", "Gebhardt", role = c("aut", "cre", "cph"), email = "albrecht.gebhardt@aau.at", comment = "..."), person("Roger", "Bivand", role = c("aut"), email = "Roger.Bivand@nhh.no"), person("David", "Sinclair", role = c("aut","cph"), email = "david@s-hull.org")) Maintainer: Albrecht Gebhardt Description: Bivariate data interpolation on regular and irregular grids, either linear or using splines are the main part of this package. It is intended to provide FOSS replacement functions for the ACM licensed akima::interp and tripack::tri.mesh functions. Currently the piecewise linear interpolation part of akima::interp (and also akima::interpp) is implemented in interp::interp, this corresponds to the call akima::interp(..., linear=TRUE) which is the default setting and covers most of akima::interp use cases in depending packages. A re-implementation of Akimas spline interpolation (akima::interp(..., linear=FALSE)) is currently under development and will complete this package in a later version. Estimators for partial derivatives are already available, these are a prerequisite for the spline interpolation. The basic part is currently a GPLed triangulation algorithm (sweep hull algorithm by David Sinclair) providing the starting point for the piecewise linear interpolator. As side effect this algorithm is also used to provide replacements for the basic functions of the tripack package which also suffer from the ACM restrictions. All functions are designed to be backward compatible with their akima / tripack counterparts. License: GPL (>= 2) Imports: Rcpp (>= 0.12.9), deldir Suggests: sp, Deriv LinkingTo: Rcpp, RcppEigen NeedsCompilation: yes Packaged: 2020-01-07 21:35:45 UTC; agebhard Author: Albrecht Gebhardt [aut, cre, cph] (...), Roger Bivand [aut], David Sinclair [aut, cph] Repository: CRAN Date/Publication: 2020-01-08 23:01:13 UTC interp/src/0000755000176200001440000000000013605174661012353 5ustar liggesusersinterp/src/interp.h0000644000176200001440000000522413605104165014020 0ustar liggesusers#include #include #include "s_hull_pro.h" // [[Rcpp::depends(RcppEigen)]] #include using namespace Rcpp; using Eigen::MatrixXi; using Eigen::MatrixXd; using Eigen::VectorXd; using Eigen::ArrayXd; using Eigen::LLT; using Eigen::Lower; using Eigen::Map; using Eigen::Upper; using Eigen::HouseholderQR; using Rcpp::as; typedef Map MapMatd; typedef Map MapMati; typedef Map MapVecd; typedef Eigen::ColPivHouseholderQR CPivQR; typedef CPivQR::PermutationType Permutation; typedef struct triang{ int nT; // indices of points std::vector i1; std::vector i2; std::vector i3; // indices of neighbour triangles std::vector j1; std::vector j2; std::vector j3; // circumcircle data std::vector xc; std::vector yc; std::vector rc; // triangle area and ratio (ir/ccr) std::vector ar; std::vector rt; // convex hull std::vector ch; int nch; // arcs, from to node indices std::vector a1; std::vector a2; // triangles to arcs indices std::vector k1; std::vector k2; std::vector k3; int na; } Triang; typedef Eigen::Matrix< int , Eigen::Dynamic, 1> VectorXi; typedef struct edges{ int nE; VectorXi i1; VectorXi i2; VectorXi t1; VectorXi t2; MatrixXd xB; MatrixXd yB; MatrixXd zBl; MatrixXd zBr; } Edges; typedef struct nn{ MatrixXi ind; MatrixXd dist; } NN; typedef struct cc{ float xc; float yc; float rc; float ar; } CC; typedef struct pdest{ VectorXd betahat; VectorXd est; VectorXd se; double cond; } PDEst; #define EIGEN_INITIALIZE_MATRICES_BY_NAN 1 #define EIGEN_USE_BLAS 1 MatrixXd AtA(MatrixXd A); double threshold(); ArrayXd Dplus(const ArrayXd& d); double kern2d(double x, double xi, double hx, double y, double yi, double hy, std::string kernel); PDEst pD(NumericVector xD, NumericVector yD, NumericVector zD, NN nn, double x, double y, CharacterVector kernel, NumericVector h, std::string solver, int degree); PDEst pDsmooth(NumericVector xD, NumericVector yD, NumericVector zD, NN nn, double x, double y, CharacterVector kernel, NumericVector h, std::string solver, int degree, int n, bool akimaweight); triang shDt(std::vector x, std::vector y); NN nN(NumericVector x, NumericVector y); NN nN(VectorXd x, VectorXd y); NN extendNN(NN nn, NumericVector X, NumericVector Y, NumericVector x, NumericVector y); CC circum(double r1,double c1, double r2,double c2, double r3,double c3); VectorXd myDnorm(VectorXd x, double mu, double sd); interp/src/common.cpp0000644000176200001440000000241013605104165014334 0ustar liggesusers#include "interp.h" MatrixXd AtA(MatrixXd A) { int n(A.cols()); return MatrixXd(n,n).setZero().selfadjointView() .rankUpdate(A.adjoint()); } double threshold(){ return 1.0E6; // ???? FIXME } // see https://cran.r-project.org/web/packages/RcppEigen/vignettes/RcppEigen-Introduction.pdf, Fig. 9: ArrayXd Dplus(const ArrayXd& d) { ArrayXd di(d.size()); double comp(d.maxCoeff() * threshold()); for (int j = 0; j < d.size(); ++j) di[j] = (d[j] < comp) ? 0. : 1./d[j]; return di; } double kern2d(double x, double xi, double hx, double y, double yi, double hy, std::string kernel){ // implement product kernels double t1, t2, k; if(kernel=="gaussian"){ // hx is interpreted as 3*sx ... so hx=hx/3.0; hy=hy/3.0; } t1=(x-xi)/hx; t2=(y-yi)/hy; //Rcout << "t1: " << t1 << " t2: " << t2; if(kernel=="gaussian") k=1.0/(2.0*M_PI)*exp(-0.5*(t1*t1+t2*t2)); else if(kernel=="epanechnikov"){ if((abs(t1)<=1.0) & (abs(t2)<=1.0)) k=81.0/256.0*(1-t1*t1)*(1-t1*t1)*(1-t2*t2)*(1-t2*t2); else k=0.0; } else if(kernel=="uniform"){ if((abs(t1)<=1.0) & (abs(t2)<=1.0)) k=0.25; else k=0.0; } else Rf_error("kernel not implemented!"); //Rcout << " k: " << k << std::endl; return k; } interp/src/init.c0000644000176200001440000000371213605151405013454 0ustar liggesusers#include #include #include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .Call calls */ extern SEXP interp_inHull(SEXP, SEXP, SEXP, SEXP); extern SEXP interp_interpDeltri(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP interp_interpShull(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP interp_left(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP interp_nearestNeighbours(SEXP, SEXP); extern SEXP interp_on(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP interp_onHull(SEXP, SEXP, SEXP, SEXP); extern SEXP interp_partDerivGrid(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP interp_partDerivPoints(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP interp_shullDeltri(SEXP, SEXP); extern SEXP interp_triFind(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); static const R_CallMethodDef CallEntries[] = { {"interp_inHull", (DL_FUNC) &interp_inHull, 4}, {"interp_interpDeltri", (DL_FUNC) &interp_interpDeltri, 6}, {"interp_interpShull", (DL_FUNC) &interp_interpShull, 8}, {"interp_left", (DL_FUNC) &interp_left, 7}, {"interp_nearestNeighbours", (DL_FUNC) &interp_nearestNeighbours, 2}, {"interp_on", (DL_FUNC) &interp_on, 7}, {"interp_onHull", (DL_FUNC) &interp_onHull, 4}, {"interp_partDerivGrid", (DL_FUNC) &interp_partDerivGrid, 12}, {"interp_partDerivPoints", (DL_FUNC) &interp_partDerivPoints, 12}, {"interp_shullDeltri", (DL_FUNC) &interp_shullDeltri, 2}, {"interp_triFind", (DL_FUNC) &interp_triFind, 8}, {NULL, NULL, 0} }; void R_init_interp(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } interp/src/partDeriv.cpp0000644000176200001440000007021113605104165015010 0ustar liggesusers #include "interp.h" // [[Rcpp::export(name="locpoly.partderiv.grid")]] List partDerivGrid(NumericVector x, NumericVector y, NumericVector xD, NumericVector yD, NumericVector zD, CharacterVector kernel="gaussian", NumericVector h=NumericVector::create(0.25,0.25), CharacterVector solver="QR", int degree=3, bool smoothpde=false, bool akimaweight=false, int nweight=25) { // Estimate up to third order partial derivatives at x,y locations: // apply local polynomial regression of order up to 3 List ret; int nD = xD.size(); int nG = x.size(); int mG = y.size(); int p=0; if(degree==0) p=1; // local constant trend if(degree==1) p=3; // local linear trend else if(degree==2) p=6; // local quadratic trend else if(degree==3) p=10; // local cubic trend else if(degree>3) Rf_error("degree>3 !"); // initialize return matrices NumericMatrix Ze = NumericMatrix(nG,mG); NumericMatrix Zx = NumericMatrix(nG,mG); NumericMatrix Zy = NumericMatrix(nG,mG); NumericMatrix Zxy; NumericMatrix Zx2; NumericMatrix Zy2; if(degree>=2){ Zxy = NumericMatrix(nG,mG); Zx2 = NumericMatrix(nG,mG); Zy2 = NumericMatrix(nG,mG); } NumericMatrix Zx2y; NumericMatrix Zxy2; NumericMatrix Zx3; NumericMatrix Zy3; if(degree>=3){ Zx2y = NumericMatrix(nG,mG); Zxy2 = NumericMatrix(nG,mG); Zx3 = NumericMatrix(nG,mG); Zy3 = NumericMatrix(nG,mG); } //Rcout << "size is " << nD << std::endl; PDEst pde; pde.est=VectorXd(p); // do better in pd*? pde.se=VectorXd(p); NN nn=nN(xD,yD); for(int i=0; i(solver),degree,nweight,akimaweight); else pde=pD(xD,yD,zD,nn, x[i],y[j],kernel,h,as(solver),degree); // extract partial derivatives from betahat //Rcout << "betahat " << std::endl; //Rcout << betahat << std::endl; //Rcout << "#### END #### " << i << std::endl; Ze(i,j) = pde.est(0); // local estimate, not really needed, but // don't throw it away, use it for checking // get partial derivatives by using betahat for Taylor series: if(degree>=1){ Zx(i,j) = pde.est[1]; Zy(i,j) = pde.est[2]; } if(degree>=2){ Zxy(i,j) = pde.est[3]; Zx2(i,j) = pde.est[4]; Zy2(i,j) = pde.est[5]; } if(degree>=3){ Zx2y(i,j) = pde.est[6]; Zxy2(i,j) = pde.est[7]; Zx3(i,j) = pde.est[8]; Zy3(i,j) = pde.est[9]; } } } if(degree==0){ ret=List::create(_("z")=Ze); } if(degree==1){ ret=List::create(_("z")=Ze, _("zx")=Zx, _("zy")=Zy); } if(degree==2){ ret=List::create(_("z")=Ze, _("zx")=Zx, _("zy")=Zy, _("zxy")=Zxy, _("zxx")=Zx2, _("zyy")=Zy2); } if(degree==3){ ret=List::create(_("z")=Ze, _("zx")=Zx, _("zy")=Zy, _("zxy")=Zxy, _("zxx")=Zx2, _("zyy")=Zy2, _("zxxy")=Zx2y, _("zxyy")=Zxy2, _("zxxx")=Zx3, _("zyyy")=Zy3); } return ret; } // [[Rcpp::export(name="locpoly.partderiv.points")]] List partDerivPoints(NumericVector x, NumericVector y, NumericVector xD, NumericVector yD, NumericVector zD, CharacterVector kernel="gaussian", NumericVector h=NumericVector::create(0.25,0.25), CharacterVector solver="QR", int degree=3, bool smoothpde=false, bool akimaweight=false, int nweight=25) { // Estimate up to third order partial derivatives at data grid locations: // apply local polynomial regression of order up to 3 List ret; int nD = xD.size(); int nP = x.size(); int p=0; if(degree==0) p=1; // local constant trend if(degree==1) p=3; // local linear trend else if(degree==2) p=6; // local quadratic trend else if(degree==3) p=10; // local cubic trend else if(degree>3) Rf_error("degree>3 !"); // initialize return vectors NumericVector Ze = NumericVector(nP); NumericVector Zx = NumericVector(nP); NumericVector Zy = NumericVector(nP); NumericVector Zxy; NumericVector Zx2; NumericVector Zy2; if(degree>=2){ Zxy = NumericVector(nP); Zx2 = NumericVector(nP); Zy2 = NumericVector(nP); } NumericVector Zx2y; NumericVector Zxy2; NumericVector Zx3; NumericVector Zy3; if(degree>=3){ Zx2y = NumericVector(nP); Zxy2 = NumericVector(nP); Zx3 = NumericVector(nP); Zy3 = NumericVector(nP); } PDEst pde; pde.est=VectorXd(p); // do better in pd*? pde.se=VectorXd(p); NN nn=nN(xD,yD); for(int i=0; i(solver),degree,nweight,akimaweight); else pde=pD(xD,yD,zD,nn, xD[i],yD[i],kernel,h,as(solver),degree); Ze[i] = pde.est[0]; // local estimate, not really needed, but // don't throw it away, use it for checking // get partial derivatives by using betahat for Taylor series: if(degree>=1){ Zx[i] = pde.est[1]; Zy[i] = pde.est[2]; } if(degree>=2){ Zxy[i] = pde.est[3]; Zx2[i] = pde.est[4]; Zy2[i] = pde.est[5]; } if(degree>=3){ Zx2y[i] = pde.est[6]; Zxy2[i] = pde.est[7]; Zx3[i] = pde.est[8]; Zy3[i] = pde.est[9]; } } if(degree==0){ ret=List::create(_("z")=Ze); } if(degree==1){ ret=List::create(_("z")=Ze, _("zx")=Zx, _("zy")=Zy); } if(degree==2){ ret=List::create(_("z")=Ze, _("zx")=Zx, _("zy")=Zy, _("zxy")=Zxy, _("zxx")=Zx2, _("zyy")=Zy2); } if(degree==3){ ret=List::create(_("z")=Ze, _("zx")=Zx, _("zy")=Zy, _("zxy")=Zxy, _("zxx")=Zx2, _("zyy")=Zy2, _("zxxy")=Zx2y, _("zxyy")=Zxy2, _("zxxx")=Zx3, _("zyyy")=Zy3); } return ret; } PDEst pD(NumericVector xD, NumericVector yD, NumericVector zD, NN nn, double x, double y, CharacterVector kernel, NumericVector h, std::string solver, int degree){ int nD=xD.size(); double xRange=max(xD)-min(xD); double yRange=max(yD)-min(yD); if((h.size()!=2) & (h.size()!=1)) Rf_error("bandwidth parameter h is not a vector of 2 or 1 elements!"); double bwX, bwY; // global bandwidth: if(h.size()==2){ bwX=h[0]*xRange; bwY=h[1]*yRange; //Rcout << "global bw: (" << bwX << ", " << bwY << ")" << std::endl; } // initialize nearest neigbour structure for local bandwidth: NN lnn; if(h.size()==1){ NumericVector xtmp(1); xtmp[0]=x; NumericVector ytmp(1); ytmp[0]=y; // FIXME: for partDerivData only one call to nN is necessary, // outside this for loop!!! this generates the runtime difference to // global bandwidth!! lnn=extendNN(nn, xD,yD,xtmp,ytmp); //Rcout << "distance matrix" << std::endl; //Rcout << nn.ind << std::endl; //Rcout << nn.dist << std::endl; } //Rcout << "data point " << i << std::endl; // setup design matrix, // 3, 6 or 10 columns for 1st, 2nd or 3rd degree bivariate polynomial: // X=(1, (x-x0), (y-y0), // (x-x0)(y-y0), (x-x0)^2, (y-y0)^2, // (x-x0)^2(y-y0), (x-x0)(y-y0)^2, (x-x0)^3, (y-y0)^3) int p; if(degree==0) p=1; // local constant trend else if(degree==1) p=3; // local linear trend else if(degree==2) p=6; // local quadratic trend else if(degree==3) p=10; // local cubic trend MatrixXd X(nD,p); for(int j=0; j=1){ X(j,1)=x-xD[j]; X(j,2)=y-yD[j]; if(degree>=2){ X(j,3)=(x-xD[j])*(y-yD[j]); X(j,4)=(x-xD[j])*(x-xD[j]); X(j,5)=(y-yD[j])*(y-yD[j]); } if(degree>=3){ X(j,6)=(x-xD[j])*(x-xD[j])*(y-yD[j]); X(j,7)=(x-xD[j])*(y-yD[j])*(y-yD[j]); X(j,8)=(x-xD[j])*(x-xD[j])*(x-xD[j]); X(j,9)=(y-yD[j])*(y-yD[j])*(y-yD[j]); } } } // build diagonal weight matrix, better use Diagonal matrix type Eigen::DiagonalMatrix W(nD); // local bandwidth: if(h.size()==1){ if(h[0]>1) Rf_error("local bandwidth parameter >1 !"); int nnX=h[0]*nD+1; // +1: the actual point has a duplicate! // the 2nd order local polynomial needs at least 6 data locations: if(nnX<=p){ // Rf_warning("local bandwidth parameter to small, increasing"); nnX=min(IntegerVector(nD,p)); } if(nnX==nD) nnX=nD-1; bwX=lnn.dist(0,nnX); // FIXME: use lnn.dist() ?????????? bwY=bwX; //Rcout << "local bw: (" << bwX << ", " << bwY << ")" << std::endl; } for(int j=0; j(kernel))); } //Rcout << "W^0.5 is " << std::endl; //Rcout << W << std::endl; // replace Xm' with X'*W^0.5 to get weighted least squares: const MatrixXd Xm(W*X); // Rcout << "design matrix is" << std::endl; // Rcout << Xh << std::endl; // Rcout << "Wm is " << std::endl; // Rcout << Wm << std::endl; // solve normal equations, use: // https://cran.r-project.org/web/packages/RcppEigen/ // vignettes/RcppEigen-Introduction.pdf section 4: // replace y with W^0.5*y to get weighted least squares: const VectorXd yd(W*as(zD)); // Rcout << "ys is " << std::endl; // Rcout << yd << std::endl; const int n(Xm.rows());//, p(Xm.cols()); PDEst pde; pde.betahat=VectorXd(p); for(int i=0;i svdXm(Xm); pde.cond = svdXm.singularValues()(0) / svdXm.singularValues()(svdXm.singularValues().size()-1); if(solver=="LLt"){ // this is the LLt Cholesky solver, section 4.1 of // https://cran.r-project.org/web/packages/RcppEigen/vignettes/RcppEigen-Introduction.pdf: const LLT llt(AtA(Xm)); pde.betahat=llt.solve(Xm.adjoint() * yd); const VectorXd fitted(Xm * pde.betahat); const VectorXd resid(yd - fitted); const int df(n - p); const double s(resid.norm() / std::sqrt(double(df))); // FIXME //pde.se=W.inverse()*(s * llt.matrixL().solve(MatrixXd::Identity(p, p)) // .colwise().norm()); } else // if(solver=="LDLt"){ // } else if(solver=="QR"){ // this is the unpivoted QR decomposition solver, section 4.2 const HouseholderQR QR(Xm); pde.betahat=QR.solve(yd); //const VectorXd fitted(Xm * pde.betahat); //const int df(n - p); // FIXME: memory errors detected by ASAN and valgrind: //pde.se=W.inverse()*(QR.matrixQR().topRows(p).triangularView() // .solve(MatrixXd::Identity(p,p)).rowwise().norm()); } else if(solver=="SVD"){ // this is the SVD based solver, section 4.4: const Eigen::JacobiSVD UDV(Xm.jacobiSvd(Eigen::ComputeThinU|Eigen::ComputeThinV)); const ArrayXd Dp(Dplus(UDV.singularValues())); const int r((Dp > 0).count()); const MatrixXd VDp(UDV.matrixV() * Dp.matrix().asDiagonal()); pde.betahat=VDp * UDV.matrixU().adjoint() * yd; const VectorXd fitted(Xm * pde.betahat); const VectorXd resid(yd - fitted); const int df(nD - p); const double s(resid.norm() / std::sqrt(double(df))); // FIXME // pde.se=W.inverse()*(s * VDp.rowwise().norm()); } else if(solver=="Eigen"){ // this is the eigen decomposition based solver, section 4.5: const Eigen::SelfAdjointEigenSolver VLV(AtA(Xm)); const ArrayXd Dp(Dplus(VLV.eigenvalues()).sqrt()); const int r((Dp > 0).count()); const MatrixXd VDp(VLV.eigenvectors() * Dp.matrix().asDiagonal()); pde.betahat=VDp * VDp.adjoint() * Xm.adjoint() * yd; const VectorXd fitted(Xm * pde.betahat); const VectorXd resid(yd - fitted); const int df(nD - p); const double s(resid.norm() / std::sqrt(double(df))); // FIXME //pde.se=W.inverse()*(s * VDp.rowwise().norm()); } else if(solver=="CPivQR"){ // this is the column based pivoted QR solver, section 4.6: const CPivQR PQR(Xm); const Permutation Pmat(PQR.colsPermutation()); const int r(PQR.rank()); VectorXd fitted, se; if (r == Xm.cols()) { // full rank case // Rcout << "pQR full rank" << std::endl; pde.betahat = PQR.solve(yd); //fitted = Xm * pde.betahat; // FIXME //pde.se = W.inverse() * (Pmat * PQR.matrixQR().topRows(p).triangularView() // .solve(MatrixXd::Identity(p, p)).rowwise().norm()); } else { // Rcout << "pQR no full rank " << r << " < " << Xm.cols() << std::endl; MatrixXd Rinv(PQR.matrixQR().topLeftCorner(r, r) .triangularView().solve(MatrixXd::Identity(r, r))); VectorXd effects(PQR.householderQ().adjoint() * yd); pde.betahat.fill(::NA_REAL); pde.betahat.head(r) = Rinv * effects.head(r); pde.betahat = Pmat * pde.betahat; // FIXME //se.fill(::NA_REAL); //se.head(r) = Rinv.rowwise().norm(); //se = W.inverse() * (Pmat * se); // create fitted values from effects //effects.tail(Xm.rows() - r).setZero(); //fitted = PQR.householderQ() * effects; } } else Rf_error("unknown solver"); /* results, we use only pde.betahat and se for now: return List::create(Named("coefficients") = pde.betahat, Named("fitted.values") = fitted, Named("residuals") = resid, Named("s") = s, Named("df.residual") = df, Named("rank") = p, Named("Std. Error") = se); */ /* Rcout << "pde.est:" << std::endl; Rcout << pde.est << std::endl; Rcout << "pde.se:" << std::endl; Rcout << pde.se << std::endl; */ // prepare return vector pde.est[0] = pde.betahat[0]; // local estimate, not really needed, but // don't throw it away, use it for checking // get partial derivatives by treating pde.betahat as Taylor series coefficients: if(degree>=1){ pde.est[1] = -pde.betahat[1]; // -1=-1 * 1! because we use 1/n!(x_0-x)^n in taylor pde.est[2] = -pde.betahat[2]; // series so we have: 1/n!(-1)^n(x-x_0)^n } if(degree>=2){ pde.est[3] = pde.betahat(3); // factor 1= 1!*1! pde.est[4] = 2L*pde.betahat(4); // factor 2= 2!*0! pde.est[5] = 2L*pde.betahat(5); // factor 2= 0!*2! } if(degree>=3){ // not used for akima splines, but keep it anyway: pde.est[6] = -2L*pde.betahat(6); // factor 2= 2!*1!, "-" see above pde.est[7] = -2L*pde.betahat(7); // factor 2= 1!*2! pde.est[8] = -6L*pde.betahat(8); // factor 6= 3!*0! pde.est[9] = -6L*pde.betahat(9); // factor 6= 0!*3! } return pde; } PDEst pDsmooth(NumericVector xD, NumericVector yD, NumericVector zD, NN nn, double x, double y, CharacterVector kernel, NumericVector h, std::string solver, int degree, int n, bool akimaweight){ // estimate derivatives for up to n (or better p) nearest neighbours, // return average according to Akimas weigthing scheme int p; if(degree==0) p=1; // local constant trend, //FIXME: use Akimas plane with only two points, e.g. by //else if(degree==0.5) // p=2; // else if(degree==1) p=3; // local linear trend else if(degree==2) p=6; // local quadratic trend else if(degree==3) p=10; // local cubic trend if(n==0){ n=p; } else { if(n>xD.size()) n=xD.size(); } VectorXd Z(n), L(n); VectorXd Zx(n), Lx(n); VectorXd Zy(n), Ly(n); VectorXd Zxy(n); VectorXd Zxx(n); VectorXd Zyy(n); VectorXd Zxxy(n); VectorXd Zxyy(n); VectorXd Zxxx(n); VectorXd Zyyy(n); /* NN lnn; if(h.size()==1){ NumericVector xtmp(1); xtmp[0]=x; NumericVector ytmp(1); ytmp[0]=y; // FIXME: for partDerivData only one call to nN is necessary, // outside this for loop!!! this generates the runtime difference to // global bandwidth!! lnn=extendNN(nn, xD,yD,xtmp,ytmp); Rcout << "distance matrix" << std::endl; Rcout << lnn.ind << std::endl; Rcout << lnn.dist << std::endl; } */ // TODO PDEst pde=pD(xD,yD,zD,nn,x,y,kernel,h,solver,degree); PDEst lsfit=pD(xD,yD,zD,nn,x,y,kernel,h,solver,1); // Rcout << " pde: " << pde.est << std::endl; for(int i=0;i0){ Zx[i]=pde.est[1]; Zy[i]=pde.est[2]; Lx[i]=lsfit.est[1]; Ly[i]=lsfit.est[2]; if(degree>1){ Zxy[i]=pde.est[3]; Zxx[i]=pde.est[4]; Zyy[i]=pde.est[5]; if(degree>2){ Zxxy[i]=pde.est[6]; Zxyy[i]=pde.est[7]; Zxxx[i]=pde.est[8]; Zyyy[i]=pde.est[9]; } } } } else { double lx,ly; lx=xD[nn.ind(0,i)]; ly=yD[nn.ind(0,i)]; // prepare vectors for later calculation of derivatives via scalar product // reproduce in Maxima with /* beta:[b0,b1,b2,b3,b4,b5,b6,b7,b8,b9]$ fvec(x,y):=[1,(x-x0),(y-y0),(x-x0)*(y-y0),(x-x0)^2,(y-y0)^2,(x-x0)^2*(y-y0),(x-x0)*(y-y0)^2,(x-x0)^3,(y-y0)^3]$ pol(x,y):=beta.fvec(x,y)$ diff(pol(x,y),x,1); diff(pol(x,y),y,1); diff(diff(pol(x,y),x,1),y,1); diff(pol(x,y),x,2); diff(pol(x,y),y,2); diff(diff(pol(x,y),x,2),y,1); diff(diff(pol(x,y),x,1),y,2); diff(pol(x,y),x,3); diff(pol(x,y),y,3); */ VectorXd fvec(p); VectorXd fxvec(p); VectorXd fyvec(p); VectorXd fxyvec(p); VectorXd fxxvec(p); VectorXd fyyvec(p); VectorXd fxxyvec(p); VectorXd fxyyvec(p); VectorXd fxxxvec(p); VectorXd fyyyvec(p); fvec[0]=1.0; fxvec[0]=0.0; fyvec[0]=0.0; fxxvec[0]=0.0; fxyvec[0]=0.0; fyyvec[0]=0.0; fxxyvec[0]=0.0; fxyyvec[0]=0.0; fxxxvec[0]=0.0; fyyyvec[0]=0.0; if(degree>0){ fvec[1]=(lx-x); fxvec[1]=1.0; fyvec[1]=0.0; fxyvec[1]=0.0; fxxvec[1]=0.0; fyyvec[1]=0.0; fxxyvec[1]=0.0; fxyyvec[1]=0.0; fxxxvec[1]=0.0; fyyyvec[1]=0.0; fvec[2]=(ly-y); fxvec[2]=0.0; fyvec[2]=1.0; fxyvec[2]=0.0; fxxvec[2]=0.0; fyyvec[2]=0.0; fxxyvec[2]=0.0; fxyyvec[2]=0.0; fxxxvec[2]=0.0; fyyyvec[2]=0.0; if(degree>1){ fvec[3]=(lx-x)*(ly-y); fxvec[3]=(ly-y); fyvec[3]=(lx-x); fxyvec[3]=1.0; fxxvec[3]=0.0; fyyvec[3]=0.0; fxxyvec[3]=0.0; fxyyvec[3]=0.0; fxxxvec[3]=0.0; fyyyvec[3]=0.0; fvec[4]=(lx-x)*(lx-x); fxvec[4]=2.0*(lx-x); fyvec[4]=0.0; fxyvec[4]=0.0; fxxvec[4]=2.0; fyyvec[4]=0.0; fxxyvec[4]=0.0; fxyyvec[4]=0.0; fxxxvec[4]=0.0; fyyyvec[4]=0.0; fvec[5]=(ly-y)*(ly-y); fxvec[5]=0.0; fyvec[5]=2.0*(ly-y); fxyvec[5]=0.0; fxxvec[5]=0.0; fyyvec[5]=2.0; fxxyvec[5]=0.0; fxyyvec[5]=0.0; fxxxvec[5]=0.0; fyyyvec[5]=0.0; if(degree>2){ fvec[6] = (lx-x)*(lx-x)*(ly-y); fxvec[6] = 2.0*(lx-x)*(ly-y); fyvec[6] = (lx-x)*(lx-x); fxyvec[6] = 2.0*(lx-x); fxxvec[6] = 2.0*(ly-y); fyyvec[6] = 0.0; fxxyvec[6]=2.0; fxyyvec[6]=0.0; fxxxvec[6]=0.0; fyyyvec[6]=0.0; fvec[7] = (lx-x)*(ly-y)*(ly-y); fxvec[7] = (ly-y)*(ly-y); fyvec[7] = 2.0*(lx-x)*(ly-y); fxyvec[7] = 2.0*(ly-y); fxxvec[7] = 0.0; fyyvec[7] = 2.0*(lx-x); fxxyvec[7]=0.0; fxyyvec[7]=2.0; fxxxvec[7]=0.0; fyyyvec[7]=0.0; fvec[8] = (lx-x)*(lx-x)*(lx-x); fxvec[8] = 3.0*(lx-x)*(lx-x); fyvec[8] = 0.0; fxyvec[8] = 0.0; fxxvec[8] = 6.0*(lx-x); fyyvec[8] = 0.0; fxxyvec[8]=0.0; fxyyvec[8]=0.0; fxxxvec[8]=6.0; fyyyvec[8]=0.0; fvec[9] = (ly-y)*(ly-y)*(ly-y); fxvec[9] = 0.0; fyvec[9] = 3.0*(ly-y)*(ly-y); fxyvec[9] = 0.0; fxxvec[9] = 0.0; fyyvec[9] = 6.0*(ly-y); fxxyvec[9]=0.0; fxyyvec[9]=0.0; fxxxvec[9]=0.0; fyyyvec[9]=6.0; } } } // FIXME, check coefficients to find bug Z[i]=1.0/20.0*pde.betahat.transpose()*fvec; if(degree>=1){ Zx[i]=-1.0/10.0*pde.betahat.transpose()*fxvec; Zy[i]=-1.0/10.0*pde.betahat.transpose()*fyvec; if(degree>=2){ Zxy[i]=1.0/3.0*pde.betahat.transpose()*fxyvec; Zxx[i]=1.0/3.0*pde.betahat.transpose()*fxxvec; Zyy[i]=1.0/3.0*pde.betahat.transpose()*fyyvec; if(degree>=3){ Zxxy[i]=-pde.betahat.transpose()*fxxyvec; Zxyy[i]=-pde.betahat.transpose()*fxyyvec; Zxxx[i]=-pde.betahat.transpose()*fxxxvec; Zyyy[i]=-pde.betahat.transpose()*fyyyvec; } } } } } // Prepare Akimas weighting scheme VectorXd pdmean(n); VectorXd pdsd(n); VectorXd weight(n),vweight(n),pweight(n); // first part: 5- (2-) dimensional normal density values as weights, // estimate componentwise parameters, use product density if(akimaweight){ pdmean[0]=Z.sum()/n; pdsd[0]=1.0/(n-1)*((Z.array()-pdmean[0]).array()*(Z.array()-pdmean[0]).array()).sum(); if(degree>=1){ pdmean[1]=Zx.sum()/n; pdsd[1]=1.0/(n-1)*((Z.array()-pdmean[1]).array()*(Z.array()-pdmean[1]).array()).sum(); pdmean[2]=Zy.sum()/n; pdsd[2]=1.0/(n-1)*((Z.array()-pdmean[2]).array()*(Z.array()-pdmean[2]).array()).sum(); if(degree>=2){ pdmean[3]=Zxy.sum()/n; pdsd[3]=1.0/(n-1)*((Z.array()-pdmean[3]).array()*(Z.array()-pdmean[3]).array()).sum(); pdmean[4]=Zxx.sum()/n; pdsd[4]=1.0/(n-1)*((Z.array()-pdmean[4]).array()*(Z.array()-pdmean[4]).array()).sum(); pdmean[5]=Zxy.sum()/n; pdsd[5]=1.0/(n-1)*((Z.array()-pdmean[5]).array()*(Z.array()-pdmean[5]).array()).sum(); if(degree>=3){ pdmean[6]=Zxxy.sum()/n; pdsd[6]=1.0/(n-1)*((Z.array()-pdmean[6]).array()*(Z.array()-pdmean[6]).array()).sum(); pdmean[7]=Zxyy.sum()/n; pdsd[7]=1.0/(n-1)*((Z.array()-pdmean[7]).array()*(Z.array()-pdmean[7]).array()).sum(); pdmean[8]=Zxxx.sum()/n; pdsd[8]=1.0/(n-1)*((Z.array()-pdmean[8]).array()*(Z.array()-pdmean[8]).array()).sum(); pdmean[9]=Zyyy.sum()/n; pdsd[9]=1.0/(n-1)*((Z.array()-pdmean[9]).array()*(Z.array()-pdmean[9]).array()).sum(); } } } //Rcout << "pdmean: " << pdmean << std::endl; //Rcout << "pdsd: " << pdsd << std::endl; // this doesnt work, why? // weight=dnorm(wrap(Zx),pdmean[1],pdsd[1]); pweight=(myDnorm(Zx,pdmean[1],pdsd[1])).array()* (myDnorm(Zy,pdmean[2],pdsd[2])).array(); vweight=(Zx.array()-Lx.array())*(Zx.array()-Lx.array())+ (Zy.array()-Ly.array())*(Zy.array()-Ly.array()); if(degree>=2){ pweight=pweight.array()*(myDnorm(Zxy,pdmean[3],pdsd[3])).array()* (myDnorm(Zxx,pdmean[4],pdsd[4])).array()* (myDnorm(Zyy,pdmean[5],pdsd[5])).array(); vweight=vweight.array()+Zxy.array()*Zxy.array()+ Zxx.array()*Zxx.array()+ Zyy.array()*Zyy.array(); } double wsp=pweight.sum(); double wsv=vweight.sum(); pweight = pweight.array()/wsp; // vweight = vweight.array()/wsv; // TODO: if vweight != 0 bool gtZ=true; //Rcout << "use volatility weights: " << gtZ << std::endl; for(int i=0; i=1){ pde.est[1]=weight.transpose()*Zx; pde.est[2]=weight.transpose()*Zy; if(degree>=2){ pde.est[3]=weight.transpose()*Zxy; pde.est[4]=weight.transpose()*Zxx; pde.est[5]=weight.transpose()*Zxy; if(degree>=3){ pde.est[6]=weight.transpose()*Zxxy; pde.est[7]=weight.transpose()*Zxyy; pde.est[8]=weight.transpose()*Zxxx; pde.est[9]=weight.transpose()*Zyyy; } } } // note: pde.betahat is here meaningless !! return pde; } // [[Rcpp::export(name="nearest.neighbours")]] List nearestNeighbours(NumericVector x, NumericVector y){ NN ans=nN(x,y); List ret=List::create(_("index")=(ans.ind.array()+1).matrix(), _("dist")=ans.dist); return ret; } NN nN(NumericVector x, NumericVector y){ NN ret; //Rcout << "x: " << x << std::endl; //Rcout << "y: " << y << std::endl; int n=x.size(); if(y.size()!=n) Rf_error("sizes of x and y dont match!"); ret.ind=MatrixXi(n,n).setZero(); ret.dist=MatrixXd(n,n).setZero(); // FIXME: exclude case i==j !!!, return matrix should be n x (n-1) for(int i=0; idij){ // shift right to make room for insert for(int l=j;l>k;l--){ ret.dist(i,l)=ret.dist(i,l-1); ret.ind(i,l)=ret.ind(i,l-1); } // insert //Rcout << "point " << i << ", insert " << j << " at " << k < >(X), Rcpp::as >(x); VectorXd ytmp = VectorXd(n+N); ytmp << Rcpp::as >(Y), Rcpp::as >(y); ret.ind.block(0,0,N,N)=nn.ind; ret.dist.block(0,0,N,N)=nn.dist; for(int i=0; i=N)) | (i>+N)){ double dij=sqrt((xtmp[i]-xtmp[j])*(xtmp[i]-xtmp[j])+(ytmp[i]-ytmp[j])*(ytmp[i]-ytmp[j])); //Rcout << "dist: " << dij << std::endl; // simply record first neighbour // sort in other neighbours for(int k=0; kdij){ // shift right for(int l=j;l>k;l--){ ret.dist(i,l)=ret.dist(i,l-1); ret.ind(i,l)=ret.ind(i,l-1); } // insert //Rcout << "point " << i << ", insert " << j << " at " << k < pts; std::vector triads; std::vector outx; int nx=x.size(); int ny=y.size(); List ret; if(nx!=ny) ::Rf_error("length of x and y dont match!"); try { // do s-Hull triangulation: // call shDt Triang tXYZ=shDt(Rcpp::as >(x), Rcpp::as >(y)); int nT=tXYZ.nT; tXYZ.xc=std::vector(nT); tXYZ.yc=std::vector(nT); tXYZ.rc=std::vector(nT); tXYZ.ar=std::vector(nT); tXYZ.rt=std::vector(nT); for(int i=0; i cp1=std::vector(nx); std::vector cp2=std::vector(nx); // count int nCH=0; // check if neigbour triangle is not present (-1), means that // arc is part of the convex hull, for(int i=0; i 1 trlist(i,4)=tXYZ.j2[i]+1; trlist(i,5)=tXYZ.j3[i]+1; trlist(i,6)=tXYZ.k1[i]+1; trlist(i,7)=tXYZ.k2[i]+1; trlist(i,8)=tXYZ.k3[i]+1; } NumericMatrix cclist=NumericMatrix(nT,5); for(int i=0; i x, std::vector y){ // Note: circumcircles and convex hull only done in shullDeltri // as this is not needed for the application within Akimas // spline routines. Triang Txy; std::vector pts; std::vector triads; std::vector outx; int nx=x.size(); int ny=y.size(); if(nx!=ny) ::Rf_error("length of x and y dont match!"); try { // triangulation // Rcout << "start triangulation" << std::endl; for(int i=0; i(nT); Txy.i2=std::vector(nT); Txy.i3=std::vector(nT); Txy.j1=std::vector(nT); Txy.j2=std::vector(nT); Txy.j3=std::vector(nT); // not used here, dummy allocation Txy.k1=std::vector(1); Txy.k2=std::vector(1); Txy.k3=std::vector(1); Txy.ch=std::vector(1); Txy.a1=std::vector(1); Txy.a2=std::vector(1); Txy.xc=std::vector(1); Txy.yc=std::vector(1); Txy.rc=std::vector(1); Txy.ar=std::vector(1); Txy.rt=std::vector(1); for(int i=0; i=eps); //ret[i]=((x2-x1)*(y0[i]-y1)>=(x0[i]-x1)*(y2-y1)); } return ret; } // [[Rcpp::export]] LogicalVector on(double x1,double y1, double x2, double y2, NumericVector x0, NumericVector y0, double eps=1E-16){ int n=x0.size(); LogicalVector ret(n); for(int i=0; i #include #include /* copyright 2016 Dr David Sinclair david@s-hull.org program to compute Delaunay triangulation of a set of points. this code is released under GPL3, a copy ofthe license can be found at http://www.gnu.org/licenses/gpl-3.0.html you can purchase a un-restricted licnese from http://www.s-hull.org for the price of one beer! revised 12/feb/2016 */ struct Triad { int a,b, c; int ab, bc, ac; // adjacent edges index to neighbouring triangle. float ro, R,C; //std::set idx; Triad() {}; Triad(int x, int y) : a(x), b(y),c(0), ab(-1), bc(-1), ac(-1), ro(-1), R(0), C(0) {}; Triad(int x, int y, int z) : a(x), b(y), c(z), ab(-1), bc(-1), ac(-1), ro(-1), R(0), C(0) {}; Triad(const Triad &p) : a(p.a), b(p.b), c(p.c), ab(p.ab), bc(p.bc), ac(p.ac), ro(p.ro), R(p.R), C(p.C) {}; Triad &operator=(const Triad &p) { a = p.a; b = p.b; c = p.c; ab = p.ab; bc = p.bc; ac = p.ac; ro = p.ro; R = p.R; C = p.C; return *this; }; }; /* point structure for s_hull only. has to keep track of triangle ids as hull evolves. */ struct Shx { int id, trid; float r,c, tr,tc; float ro; Shx() {}; Shx(float a, float b) : id(-1), r(a), c(b), tr(0.0), tc(0.0), ro(0.0) {}; Shx(float a, float b, float x) : id(-1), r(a), c(b), tr(0), tc(0), ro(x) {}; Shx(const Shx &p) : id(p.id), trid(p.trid), r(p.r), c(p.c), tr(p.tr), tc(p.tc), ro(p.ro) {}; Shx &operator=(const Shx &p) { id = p.id; trid = p.trid; r = p.r; c = p.c; tr = p.tr; tc = p.tc; ro = p.ro; return *this; }; }; // sort into descending order (for use in corner responce ranking). inline bool operator<(const Shx &a, const Shx &b) { if( a.ro == b.ro){ if( a.r == b.r ){ return a.c < b.c; } return a.r < b.r; } return a.ro < b.ro; }; struct Dupex { int id; float r,c; Dupex() {}; Dupex(float a, float b) : id(-1), r(a), c(b) {}; Dupex(float a, float b, int x) : id(x), r(a), c(b) {}; Dupex(const Dupex &p) : id(p.id), r(p.r), c(p.c) {}; Dupex &operator=(const Dupex &p) { id = p.id; r = p.r; c = p.c; return *this; }; }; // sort into descending order (for use in corner responce ranking). inline bool operator<(const Dupex &a, const Dupex &b) { if( a.r == b.r) return a.c < b.c; return a.r < b.r; }; // from s_hull.C int s_hull_pro( std::vector &pts, std::vector &triads); void circle_cent2(float r1,float c1, float r2,float c2, float r3,float c3,float &r,float &c, float &ro2); void circle_cent4(float r1,float c1, float r2,float c2, float r3,float c3,float &r,float &c, float &ro2); void write_Shx(std::vector &pts, char * fname); void write_Triads(std::vector &ts, char * fname); int Cline_Renka_test(float &Ax, float &Ay, float &Bx, float &By, float &Cx, float &Cy, float &Dx, float &Dy); int T_flip_pro( std::vector &pts, std::vector &triads, std::vector &slump, int numt, int start, std::vector &ids); int T_flip_pro_idx( std::vector &pts, std::vector &triads, std::vector &slump, std::vector &ids, std::vector &ids2); int read_Shx(std::vector &pts, char * fname); int de_duplicate( std::vector &pts, std::vector &outx ); int de_duplicateX( std::vector &pts, std::vector &outx,std::vector &pts2 ); int test_center(Shx &pt0, Shx &pt1,Shx &pt2); int T_flip_edge( std::vector &pts, std::vector &triads, std::vector &slump, int numt, int start, std::vector &ids); #endif interp/src/interp.cpp0000644000176200001440000002301613605104165014352 0ustar liggesusers #include "interp.h" // [[Rcpp::export]] List interpDeltri(NumericVector x, NumericVector y, NumericVector zD, List t, // data xD and yD contained here! CharacterVector input = "points", CharacterVector output = "grid") { List T(t); int nT = T.size(); int nG = x.size(); int mG = y.size(); NumericMatrix z; // initialize return matrix with NA: if(as(output)=="grid"){ NumericMatrix z = NumericMatrix(nG,mG,NumericVector (nG*mG,NumericVector::get_na()).begin()); } if(as(output)=="points"){ NumericMatrix z = NumericMatrix(nG,1,NumericVector (nG,NumericVector::get_na()).begin()); } List ret; // bounding box for triangles: IntegerVector jTsw(nT); IntegerVector kTsw(nT); IntegerVector jTne(nT); IntegerVector kTne(nT); try { if(as(output)=="grid"){ // get bounding boxes (SW <-> NE) for all triangles: for(int i=0; ixne) jTne[i]=nG-j-1; } for(int k=0; kyne) kTne[i]=mG-k-1; } } } // iterate over triangles for(int i=0; i(output)=="grid"){ // iterate only over grid points (j,k) inside bounding box of triangle i for(int j=jTsw[i]; j(output)=="points"){ // iterate over output points for(int j=0; j(output)=="grid"){ z = NumericMatrix(nG,mG,NumericVector (nG*mG,NumericVector::get_na()).begin()); } if(as(output)=="points"){ z = NumericMatrix(nG,1,NumericVector (nG,NumericVector::get_na()).begin()); } List ret; try{ // part 0 // do s-Hull triangulation: // call shDt Triang tXY=shDt(Rcpp::as >(xD), Rcpp::as >(yD)); // note: triangles are enumerated counterclockwise int nT=tXY.nT; // Rcout << "get bounding boxes" < NE) for all triangles: IntegerVector jTsw(nT); IntegerVector kTsw(nT); IntegerVector jTne(nT); IntegerVector kTne(nT); IntegerVector iT = IntegerVector(3); NumericVector xT = NumericVector(3); NumericVector yT = NumericVector(3); NumericVector zT = NumericVector(3); for(int i=0; ixne) jTne[i]=nG-j-1; } for(int k=0; kyne) kTne[i]=mG-k-1; } } // part 2 // TODO for !linear // part 3 // iterate over triangles for(int i=0;i(output)=="grid"){ // iterate only over grid points (j,k) inside bounding box of triangle i for(int j=jTsw[i]; j(output)=="points"){ // iterate over output points for(int j=0; j //#include //#include #include #include #include #include #include #include #include #include "s_hull_pro.h" using namespace std; /* copyright 2016 Dr David Sinclair david@s-hull.org program to compute Delaunay triangulation of a set of points. this code is released under GPL3, a copy ofthe license can be found at http://www.gnu.org/licenses/gpl-3.0.html you can purchase a un-restricted licnese from http://www.s-hull.org for the price of one beer! revised 2/April/2016 */ void circle_cent2(float r1,float c1, float r2,float c2, float r3,float c3, float &r,float &c, float &ro2){ /* * function to return the center of a circle and its radius * degenerate case should never be passed to this routine!!!!!!!!!!!!! * but will return r0 = -1 if it is. */ float a1 = (r1+r2)/2.0; float a2 = (c1+c2)/2.0; float b1 = (r3+r2)/2.0; float b2 = (c3+c2)/2.0; float e2 = r1-r2; float e1 = -c1+c2; float q2 = r3-r2; float q1 = -c3+c2; r=0; c=0; ro2=-1; if( e1*-q2 + e2*q1 == 0 ) return; float beta = (-e2*(b1-a1) + e1*(b2-a2))/( e2*q1-e1*q2); r = b1 + q1*beta; c = b2 + q2*beta; ro2 = (r1-r)*(r1-r) + (c1-c)*(c1-c); return; } /* read an ascii file of (r,c) point pairs. the first line of the points file should contain "NUMP 2 points" if it does not have the word points in it the first line is interpretted as a point pair. */ int read_Shx(std::vector &pts, char * fname){ char s0[513]; int nump =0; float p1,p2; Shx pt; std::string line; std::string points_str("points"); std::ifstream myfile; myfile.open(fname); if (myfile.is_open()){ getline (myfile,line); //int numc = line.length(); // check string for the string "points" int n = (int) line.find( points_str); if( n > 0){ while ( myfile.good() ){ getline (myfile,line); if( line.length() <= 512){ copy( line.begin(), line.end(), s0); s0[line.length()] = 0; int v = sscanf( s0, "%g %g", &p1,&p2); if( v>0 ){ pt.id = nump; nump++; pt.r = p1; pt.c = p2; pts.push_back(pt); } } } } else{ // assume all number pairs on a line are points if( line.length() <= 512){ copy( line.begin(), line.end(), s0); s0[line.length()] = 0; int v = sscanf( s0, "%g %g", &p1,&p2); if( v>0 ){ pt.id = nump; nump++; pt.r = p1; pt.c = p2; pts.push_back(pt); } } while ( myfile.good() ){ getline (myfile,line); if( line.length() <= 512){ copy( line.begin(), line.end(), s0); s0[line.length()] = 0; int v = sscanf( s0, "%g %g", &p1,&p2); if( v>0 ){ pt.id = nump; nump++; pt.r = p1; pt.c = p2; pts.push_back(pt); } } } } myfile.close(); } nump = (int) pts.size(); return(nump); }; /* write out a set of points to disk */ void write_Shx(std::vector &pts, char * fname){ std::ofstream out(fname, ios::out); int nr = (int) pts.size(); out << nr << " 2 points" << endl; for (int r = 0; r < nr; r++){ out << pts[r].r << ' ' << pts[r].c << endl; } out.close(); return; }; /* write out triangle ids to be compatible with matlab/octave array numbering. */ void write_Triads(std::vector &ts, char * fname){ std::ofstream out(fname, ios::out); int nr = (int) ts.size(); out << nr << " 6 point-ids (1,2,3) adjacent triangle-ids ( limbs ab ac bc )" << endl; for (int r = 0; r < nr; r++){ out << ts[r].a+1 << ' ' << ts[r].b+1 <<' ' << ts[r].c+1 <<' ' << ts[r].ab+1 <<' ' << ts[r].ac+1 <<' ' << ts[r].bc+1 << endl; //" " << ts[r].ro << endl; } out.close(); return; }; /* version in which the ids of the triangles associated with the sides of the hull are tracked. */ int s_hull_pro( std::vector &pts, std::vector &triads) { int nump = (int) pts.size(); if( nump < 3 ){ // cerr << "less than 3 points, aborting " << endl; return(-1); } float r = pts[0].r; float c = pts[0].c; for( int k=0; k 0 ){ mid = k; romin2 = ro2; R = r; C = c; } else if( romin2 *4 < pts[k].ro ) k=nump; k++; } if( mid < 0 ){ // cerr << "linear structure, aborting " << endl; return(-2); } Shx pt0 = pts[0]; Shx pt1 = pts[1]; Shx pt2 = pts[mid]; int ptest = test_center(pt0, pt1, pt2 ); //if( ptest < 0 ){ // cerr << "warning: obtuce seed triangle sellected " << endl; //} pts.erase(pts.begin() + mid); // necessary for round off reasons:(((((( pts.erase(pts.begin() ); pts.erase(pts.begin() ); for( int k=0; k slump; slump.resize(nump); for( int k=0; k hull; r = (pts[0].r + pts[1].r + pts[2].r )/(float) 3.0; c = (pts[0].c + pts[1].c + pts[2].c )/(float) 3.0; float dr0 = pts[0].r - r, dc0 = pts[0].c - c; float tr01 = pts[1].r - pts[0].r, tc01 = pts[1].c - pts[0].c; float df = -tr01* dc0 + tc01*dr0; if( df < 0 ){ // [ 0 1 2 ] pt0.tr = pt1.r-pt0.r; pt0.tc = pt1.c-pt0.c; pt0.trid = 0; hull.push_back( pt0 ); pt1.tr = pt2.r-pt1.r; pt1.tc = pt2.c-pt1.c; pt1.trid = 0; hull.push_back( pt1 ); pt2.tr = pt0.r-pt2.r; pt2.tc = pt0.c-pt2.c; pt2.trid = 0; hull.push_back( pt2 ); Triad tri(pt0.id,pt1.id,pt2.id); tri.ro = romin2; tri.R = R; tri.C = C; triads.push_back(tri); } else{ // [ 0 2 1 ] as anti-clockwise turning is the work of the devil.... pt0.tr = pt2.r-pt0.r; pt0.tc = pt2.c-pt0.c; pt0.trid = 0; hull.push_back( pt0 ); pt2.tr = pt1.r-pt2.r; pt2.tc = pt1.c-pt2.c; pt2.trid = 0; hull.push_back( pt2 ); pt1.tr = pt0.r-pt1.r; pt1.tc = pt0.c-pt1.c; pt1.trid = 0; hull.push_back( pt1 ); Triad tri(pt0.id,pt2.id,pt1.id); tri.ro = romin2; tri.R = R; tri.C = C; triads.push_back(tri); } // add new points into hull (removing obscured ones from the chain) // and creating triangles.... // that will need to be flipped. float dr, dc, rx,cx; Shx ptx; int numt; // write_Triads(triads, "rose_0.mat"); for( int k=3; k pidx, tridx; int hidx; // new hull point location within hull..... float df = -dc* hull[0].tr + dr*hull[0].tc; // visibility test vector. if( df < 0 ){ // starting with a visible hull facet !!! int e1 = 1, e2 = numh; hidx = 0; // check to see if segment numh is also visible df = -dc* hull[numh-1].tr + dr*hull[numh-1].tc; //cerr << df << ' ' ; if( df < 0 ){ // visible. pidx.push_back(hull[numh-1].id); tridx.push_back(hull[numh-1].trid); for( int h=0; h0; h--){ // if segment h is visible delete h + 1 dr = rx- hull[h].r; dc = cx- hull[h].c; df = -dc* hull[h].tr + dr*hull[h].tc; if( df < 0 ){ // h is visible pidx.insert(pidx.begin(), hull[h].id); tridx.insert(tridx.begin(), hull[h].trid); hull.erase(hull.begin() + h+1); // erase end of chain } else{ h = (int) hull.size()-1; hull[h].tr = -hull[h].r + ptx.r; // points at start of chain. hull[h].tc = -hull[h].c + ptx.c; break; } } df = 9; } else{ // cerr << df << ' ' << endl; hidx = 1; // keep pt hull[0] tridx.push_back(hull[0].trid); pidx.push_back(hull[0].id); for( int h=1; h 0 ){ // first invisible segment. e2 = h; break; } } } // triangle pidx starts at e1 and ends at e2 (inclusive). if( e2 < numh ){ for( int e=e1; e<=e2; e++){ pidx.push_back(hull[e].id); tridx.push_back(hull[e].trid); } } else{ for( int e=e1; e 0 ) hull[hidx-1].trid = numt; else{ numh = (int) hull.size(); hull[numh-1].trid = numt; } triads.push_back( trx ); numt++; } else{ trx.ab = -1; for(int p=0; p 0 ) trx.ab = numt-1; trx.ac = numt+1; // index back into the triads. Triad &txx = triads[tridx[p]]; if( ( trx.b == txx.a && trx.c == txx.b) |( trx.b == txx.b && trx.c == txx.a)) { txx.ab = numt; } else if( ( trx.b == txx.a && trx.c == txx.c) |( trx.b == txx.c && trx.c == txx.a)) { txx.ac = numt; } else if( ( trx.b == txx.b && trx.c == txx.c) |( trx.b == txx.c && trx.c == txx.b)) { txx.bc = numt; } triads.push_back( trx ); numt++; } triads[numt-1].ac=-1; hull[hidx].trid = numt-1; if( hidx > 0 ) hull[hidx-1].trid = T0; else{ numh = (int) hull.size(); hull[numh-1].trid = T0; } } /* char tname[128]; sprintf(tname,"rose_%d.mat",k); write_Triads(triads, tname); int dbgb = 0; */ } // cerr << "of triangles " << triads.size() << " to be flipped. "<< endl; // write_Triads(triads, "tris0.mat"); std::vector ids, ids2; int tf = T_flip_pro( pts, triads, slump, numt, 0, ids); if( tf < 0 ){ // cerr << "cannot triangualte this set " << endl; return(-3); } // write_Triads(triads, "tris1.mat"); // cerr << "n-ids " << ids.size() << endl; int nits = (int) ids.size(), nit=1; while( nits > 0 && nit < 50){ tf = T_flip_pro_idx( pts, triads, slump, ids, ids2); nits = (int) ids2.size(); ids.swap(ids2); // cerr << "flipping cycle " << nit << " active triangles " << nits << endl; nit ++; if( tf < 0 ){ // cerr << "cannot triangualte this set " << endl; return(-4); } } ids.clear(); nits = T_flip_edge( pts, triads, slump, numt, 0, ids); nit=0; while( nits > 0 && nit < 100){ tf = T_flip_pro_idx( pts, triads, slump, ids, ids2); ids.swap(ids2); nits = (int) ids.size(); //cerr << "flipping cycle " << nit << " active triangles " << nits << endl; nit ++; if( tf < 0 ){ // cerr << "cannot triangualte this set " << endl; return(-4); } } return(1); } void circle_cent4(float r1,float c1, float r2,float c2, float r3,float c3, float &r,float &c, float &ro2){ /* * function to return the center of a circle and its radius * degenerate case should never be passed to this routine!!!!!!!!!!!!! * but will return r0 = -1 if it is. */ double rd, cd; double v1 = 2*(r2-r1), v2 = 2*(c2-c1), v3 = r2*r2 - r1*r1 + c2*c2 - c1*c1; double v4 = 2*(r3-r1), v5 = 2*(c3-c1), v6 = r3*r3 - r1*r1 + c3*c3 - c1*c1, v7 = v2*v4 - v1*v5; if( v7 == 0 ){ r=0; c=0; ro2 = -1; return; } cd = (v4*v3 - v1*v6)/v7; if( v1 != 0 ) rd = (v3 - c*v2)/v1; else rd = (v6 - c*v5)/v4; ro2 = (float) ( (rd-r1)*(rd-r1) + (cd-c1)*(cd-c1) ); r = (float) rd; c = (float) cd; return; } /* test a set of points for duplicates. erase duplicate points, do not change point ids. */ int de_duplicate( std::vector &pts, std::vector &outx ){ int nump = (int) pts.size(); std::vector dpx; Dupex d; for( int k=0; k=0; k--){ pts.erase(pts.begin()+outx[k]); } return(nx); } /* flip pairs of triangles that are not valid delaunay triangles the Cline-Renka test is used rather than the less stable circum circle center computation test of s-hull. or the more expensive determinant test. */ int T_flip_pro( std::vector &pts, std::vector &triads, std::vector &slump, int numt, int start, std::vector &ids){ float r3,c3; int pa,pb,pc, pd, D, L1, L2, L3, L4, T2; Triad tx, tx2; for( int t=start; t= 0 ){ pa = slump[tri.a]; pb = slump[tri.b]; pc = slump[tri.c]; T2 = tri.bc; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.b == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.b == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.b == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ // cerr << "triangle flipping error. " << t << endl; return(-5); } if( pd < 0 || pd > 100) int dfx = 9; r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pa].r, pts[pa].c, pts[pb].r, pts[pb].c, pts[pc].r, pts[pc].c, r3, c3 ); if( XX < 0 ){ L1 = tri.ab; L2 = tri.ac; if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.a; tx.b = tri.b; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.a; tx2.b = tri.c; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids.push_back(t); ids.push_back(T2); t2 = tx2; tri = tx; flipped = 1; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } } } } if( flipped == 0 && tri.ab >= 0 ){ pc = slump[tri.c]; pb = slump[tri.b]; pa = slump[tri.a]; T2 = tri.ab; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.a == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.a == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.a == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ // cerr << "triangle flipping error. " << t << endl; return(-5); } r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pc].r, pts[pc].c, pts[pb].r, pts[pb].c, pts[pa].r, pts[pa].c,r3, c3); if( XX < 0){ L1 = tri.ac; L2 = tri.bc; if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.c; tx.b = tri.a; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.c; tx2.b = tri.b; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids.push_back(t); ids.push_back(T2); t2 = tx2; tri = tx; flipped = 1; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } } } } if( flipped == 0 && tri.ac >= 0 ){ pc = slump[tri.c]; pb = slump[tri.b]; pa = slump[tri.a]; T2 = tri.ac; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.a == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.a == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.a == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ // cerr << "triangle flipping error. " << t << endl; return(-5); } r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pb].r, pts[pb].c, pts[pa].r, pts[pa].c, pts[pc].r, pts[pc].c,r3, c3); if( XX < 0 ){ L1 = tri.ab; // .ac shared limb L2 = tri.bc; if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.b; tx.b = tri.a; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.b; tx2.b = tri.c; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids.push_back(t); ids.push_back(T2); t2 = tx2; tri = tx; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } } } } } return(1); } /* minimum angle cnatraint for circum circle test. due to Cline & Renka A -- B | / | C -- D */ int Cline_Renka_test(float &Ax, float &Ay, float &Bx, float &By, float &Cx, float &Cy, float &Dx, float &Dy) { float v1x = Bx-Ax, v1y = By-Ay, v2x = Cx-Ax, v2y = Cy-Ay, v3x = Bx-Dx, v3y = By-Dy, v4x = Cx-Dx, v4y = Cy-Dy; float cosA = v1x*v2x + v1y*v2y; float cosD = v3x*v4x + v3y*v4y; if( cosA < 0 && cosD < 0 ) // two obtuse angles return(-1); float ADX = Ax-Dx, ADy = Ay-Dy; if( cosA > 0 && cosD > 0 ) // two acute angles return(1); float sinA = fabs(v1x*v2y - v1y*v2x); float sinD = fabs(v3x*v4y - v3y*v4x); if( cosA*sinD + sinA*cosD < 0 ) return(-1); return(1); } // same again but with set of triangle ids to be iterated over. int T_flip_pro_idx( std::vector &pts, std::vector &triads, std::vector &slump, std::vector &ids, std::vector &ids2){ float r3,c3; int pa,pb,pc, pd, D, L1, L2, L3, L4, T2; Triad tx, tx2; ids2.clear(); //std::vector ids2; int numi = ids.size(); for( int x=0; x= 0 ){ pa = slump[tri.a]; pb = slump[tri.b]; pc = slump[tri.c]; T2 = tri.bc; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.b == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.b == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.b == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ // cerr << "triangle flipping error. " << t << " T2: " << T2<< endl; return(-6); } r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pa].r, pts[pa].c, pts[pb].r, pts[pb].c, pts[pc].r, pts[pc].c,r3, c3); if( XX < 0 ){ L1 = tri.ab; L2 = tri.ac; if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.a; tx.b = tri.b; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.a; tx2.b = tri.c; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids2.push_back(t); ids2.push_back(T2); t2 = tx2; tri = tx; flipped = 1; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } } } } if( flipped == 0 && tri.ab >= 0 ){ pc = slump[tri.c]; pb = slump[tri.b]; pa = slump[tri.a]; T2 = tri.ab; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.a == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.a == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.a == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ // cerr << "triangle flipping error. " << t << endl; return(-6); } r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pc].r, pts[pc].c, pts[pb].r, pts[pb].c, pts[pa].r, pts[pa].c,r3, c3); if( XX < 0 ){ L1 = tri.ac; L2 = tri.bc; if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.c; tx.b = tri.a; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.c; tx2.b = tri.b; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids2.push_back(t); ids2.push_back(T2); t2 = tx2; tri = tx; flipped = 1; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } } } } if( flipped == 0 && tri.ac >= 0 ){ pc = slump[tri.c]; pb = slump[tri.b]; pa = slump[tri.a]; T2 = tri.ac; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.a == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.a == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.a == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ //cerr << "triangle flipping error. " << t << endl; return(-6); } r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pb].r, pts[pb].c, pts[pc].r, pts[pc].c, pts[pa].r, pts[pa].c,r3, c3); if( XX < 0 ){ L1 = tri.ab; // .ac shared limb L2 = tri.bc; if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.b; tx.b = tri.a; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.b; tx2.b = tri.c; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids2.push_back(t); ids2.push_back(T2); t2 = tx2; tri = tx; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } } } } } /* if( ids2.size() > 5){ sort(ids2.begin(), ids2.end()); int nums = ids2.size(); int last = ids2[0], n=0; ids3.push_back(last); for(int g=1; g 0 ) return(-1); v = r01*r21 + c01*c21; if( v < 0 ) return(-1); return(1); } int de_duplicateX( std::vector &pts, std::vector &outx,std::vector &pts2 ){ int nump = (int) pts.size(); std::vector dpx; Dupex d; for( int k=0; k &pts, std::vector &triads, std::vector &slump, int numt, int start, std::vector &ids){ float r3,c3; int pa,pb,pc, pd, D, L1, L2, L3, L4, T2; Triad tx, tx2; for( int t=start; t= 0 && (tri.ac < 0 || tri.ab < 0) ){ pa = slump[tri.a]; pb = slump[tri.b]; pc = slump[tri.c]; T2 = tri.bc; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.b == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.b == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.b == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ //cerr << "triangle flipping error. " << t << endl; return(-5); } if( pd < 0 || pd > 100) int dfx = 9; r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pa].r, pts[pa].c, pts[pb].r, pts[pb].c, pts[pc].r, pts[pc].c, r3, c3 ); if( XX < 0 ){ L1 = tri.ab; L2 = tri.ac; // if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.a; tx.b = tri.b; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.a; tx2.b = tri.c; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids.push_back(t); ids.push_back(T2); t2 = tx2; tri = tx; flipped = 1; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } // } } } if( flipped == 0 && tri.ab >= 0 && (tri.ac < 0 || tri.bc < 0)){ pc = slump[tri.c]; pb = slump[tri.b]; pa = slump[tri.a]; T2 = tri.ab; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.a == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.a == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.a == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ //cerr << "triangle flipping error. " << t << endl; return(-5); } r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pc].r, pts[pc].c, pts[pb].r, pts[pb].c, pts[pa].r, pts[pa].c,r3, c3); if( XX < 0){ L1 = tri.ac; L2 = tri.bc; // if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.c; tx.b = tri.a; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.c; tx2.b = tri.b; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids.push_back(t); ids.push_back(T2); t2 = tx2; tri = tx; flipped = 1; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } // } } } if( flipped == 0 && tri.ac >= 0 && (tri.bc < 0 || tri.ab < 0) ){ pc = slump[tri.c]; pb = slump[tri.b]; pa = slump[tri.a]; T2 = tri.ac; Triad &t2 = triads[T2]; // find relative orientation (shared limb). if( t2.ab == t ){ D = t2.c; pd = slump[t2.c]; if( tri.a == t2.a){ L3 = t2.ac; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ac; } } else if( t2.ac == t ){ D = t2.b; pd = slump[t2.b]; if( tri.a == t2.a){ L3 = t2.ab; L4 = t2.bc; } else{ L3 = t2.bc; L4 = t2.ab; } } else if( t2.bc == t ){ D = t2.a; pd = slump[t2.a]; if( tri.a == t2.b){ L3 = t2.ab; L4 = t2.ac; } else{ L3 = t2.ac; L4 = t2.ab; } } else{ //cerr << "triangle flipping error. " << t << endl; return(-5); } r3 = pts[pd].r; c3 = pts[pd].c; int XX = Cline_Renka_test( pts[pb].r, pts[pb].c, pts[pa].r, pts[pa].c, pts[pc].r, pts[pc].c,r3, c3); if( XX < 0 ){ L1 = tri.ab; // .ac shared limb L2 = tri.bc; // if( L1 != L3 && L2 != L4 ){ // need this check for stability. tx.a = tri.b; tx.b = tri.a; tx.c = D; tx.ab = L1; tx.ac = T2; tx.bc = L3; // triangle 2; tx2.a = tri.b; tx2.b = tri.c; tx2.c = D; tx2.ab = L2; tx2.ac = t; tx2.bc = L4; ids.push_back(t); ids.push_back(T2); t2 = tx2; tri = tx; // change knock on triangle labels. if( L3 >= 0 ){ Triad &t3 = triads[L3]; if( t3.ab == T2 ) t3.ab = t; else if( t3.bc == T2 ) t3.bc = t; else if( t3.ac == T2 ) t3.ac = t; } if(L2 >= 0 ){ Triad &t4 = triads[L2]; if( t4.ab == t ) t4.ab = T2; else if( t4.bc == t ) t4.bc = T2; else if( t4.ac == t ) t4.ac = T2; } //} } } } return(1); } interp/src/RcppExports.cpp0000644000176200001440000002405513605151405015345 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include #include using namespace Rcpp; // interpDeltri List interpDeltri(NumericVector x, NumericVector y, NumericVector zD, List t, CharacterVector input, CharacterVector output); RcppExport SEXP interp_interpDeltri(SEXP xSEXP, SEXP ySEXP, SEXP zDSEXP, SEXP tSEXP, SEXP inputSEXP, SEXP outputSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); Rcpp::traits::input_parameter< NumericVector >::type zD(zDSEXP); Rcpp::traits::input_parameter< List >::type t(tSEXP); Rcpp::traits::input_parameter< CharacterVector >::type input(inputSEXP); Rcpp::traits::input_parameter< CharacterVector >::type output(outputSEXP); rcpp_result_gen = Rcpp::wrap(interpDeltri(x, y, zD, t, input, output)); return rcpp_result_gen; END_RCPP } // interpShull List interpShull(NumericVector x, NumericVector y, NumericVector xD, NumericVector yD, NumericVector zD, bool linear, CharacterVector input, CharacterVector output); RcppExport SEXP interp_interpShull(SEXP xSEXP, SEXP ySEXP, SEXP xDSEXP, SEXP yDSEXP, SEXP zDSEXP, SEXP linearSEXP, SEXP inputSEXP, SEXP outputSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); Rcpp::traits::input_parameter< NumericVector >::type xD(xDSEXP); Rcpp::traits::input_parameter< NumericVector >::type yD(yDSEXP); Rcpp::traits::input_parameter< NumericVector >::type zD(zDSEXP); Rcpp::traits::input_parameter< bool >::type linear(linearSEXP); Rcpp::traits::input_parameter< CharacterVector >::type input(inputSEXP); Rcpp::traits::input_parameter< CharacterVector >::type output(outputSEXP); rcpp_result_gen = Rcpp::wrap(interpShull(x, y, xD, yD, zD, linear, input, output)); return rcpp_result_gen; END_RCPP } // partDerivGrid List partDerivGrid(NumericVector x, NumericVector y, NumericVector xD, NumericVector yD, NumericVector zD, CharacterVector kernel, NumericVector h, CharacterVector solver, int degree, bool smoothpde, bool akimaweight, int nweight); RcppExport SEXP interp_partDerivGrid(SEXP xSEXP, SEXP ySEXP, SEXP xDSEXP, SEXP yDSEXP, SEXP zDSEXP, SEXP kernelSEXP, SEXP hSEXP, SEXP solverSEXP, SEXP degreeSEXP, SEXP smoothpdeSEXP, SEXP akimaweightSEXP, SEXP nweightSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); Rcpp::traits::input_parameter< NumericVector >::type xD(xDSEXP); Rcpp::traits::input_parameter< NumericVector >::type yD(yDSEXP); Rcpp::traits::input_parameter< NumericVector >::type zD(zDSEXP); Rcpp::traits::input_parameter< CharacterVector >::type kernel(kernelSEXP); Rcpp::traits::input_parameter< NumericVector >::type h(hSEXP); Rcpp::traits::input_parameter< CharacterVector >::type solver(solverSEXP); Rcpp::traits::input_parameter< int >::type degree(degreeSEXP); Rcpp::traits::input_parameter< bool >::type smoothpde(smoothpdeSEXP); Rcpp::traits::input_parameter< bool >::type akimaweight(akimaweightSEXP); Rcpp::traits::input_parameter< int >::type nweight(nweightSEXP); rcpp_result_gen = Rcpp::wrap(partDerivGrid(x, y, xD, yD, zD, kernel, h, solver, degree, smoothpde, akimaweight, nweight)); return rcpp_result_gen; END_RCPP } // partDerivPoints List partDerivPoints(NumericVector x, NumericVector y, NumericVector xD, NumericVector yD, NumericVector zD, CharacterVector kernel, NumericVector h, CharacterVector solver, int degree, bool smoothpde, bool akimaweight, int nweight); RcppExport SEXP interp_partDerivPoints(SEXP xSEXP, SEXP ySEXP, SEXP xDSEXP, SEXP yDSEXP, SEXP zDSEXP, SEXP kernelSEXP, SEXP hSEXP, SEXP solverSEXP, SEXP degreeSEXP, SEXP smoothpdeSEXP, SEXP akimaweightSEXP, SEXP nweightSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); Rcpp::traits::input_parameter< NumericVector >::type xD(xDSEXP); Rcpp::traits::input_parameter< NumericVector >::type yD(yDSEXP); Rcpp::traits::input_parameter< NumericVector >::type zD(zDSEXP); Rcpp::traits::input_parameter< CharacterVector >::type kernel(kernelSEXP); Rcpp::traits::input_parameter< NumericVector >::type h(hSEXP); Rcpp::traits::input_parameter< CharacterVector >::type solver(solverSEXP); Rcpp::traits::input_parameter< int >::type degree(degreeSEXP); Rcpp::traits::input_parameter< bool >::type smoothpde(smoothpdeSEXP); Rcpp::traits::input_parameter< bool >::type akimaweight(akimaweightSEXP); Rcpp::traits::input_parameter< int >::type nweight(nweightSEXP); rcpp_result_gen = Rcpp::wrap(partDerivPoints(x, y, xD, yD, zD, kernel, h, solver, degree, smoothpde, akimaweight, nweight)); return rcpp_result_gen; END_RCPP } // nearestNeighbours List nearestNeighbours(NumericVector x, NumericVector y); RcppExport SEXP interp_nearestNeighbours(SEXP xSEXP, SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(nearestNeighbours(x, y)); return rcpp_result_gen; END_RCPP } // shullDeltri List shullDeltri(NumericVector x, NumericVector y); RcppExport SEXP interp_shullDeltri(SEXP xSEXP, SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(shullDeltri(x, y)); return rcpp_result_gen; END_RCPP } // triFind List triFind(int nT, NumericVector xT, NumericVector yT, IntegerVector i1, IntegerVector i2, IntegerVector i3, NumericVector x, NumericVector y); RcppExport SEXP interp_triFind(SEXP nTSEXP, SEXP xTSEXP, SEXP yTSEXP, SEXP i1SEXP, SEXP i2SEXP, SEXP i3SEXP, SEXP xSEXP, SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type nT(nTSEXP); Rcpp::traits::input_parameter< NumericVector >::type xT(xTSEXP); Rcpp::traits::input_parameter< NumericVector >::type yT(yTSEXP); Rcpp::traits::input_parameter< IntegerVector >::type i1(i1SEXP); Rcpp::traits::input_parameter< IntegerVector >::type i2(i2SEXP); Rcpp::traits::input_parameter< IntegerVector >::type i3(i3SEXP); Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(triFind(nT, xT, yT, i1, i2, i3, x, y)); return rcpp_result_gen; END_RCPP } // left LogicalVector left(double x1, double y1, double x2, double y2, NumericVector x0, NumericVector y0, double eps); RcppExport SEXP interp_left(SEXP x1SEXP, SEXP y1SEXP, SEXP x2SEXP, SEXP y2SEXP, SEXP x0SEXP, SEXP y0SEXP, SEXP epsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type x1(x1SEXP); Rcpp::traits::input_parameter< double >::type y1(y1SEXP); Rcpp::traits::input_parameter< double >::type x2(x2SEXP); Rcpp::traits::input_parameter< double >::type y2(y2SEXP); Rcpp::traits::input_parameter< NumericVector >::type x0(x0SEXP); Rcpp::traits::input_parameter< NumericVector >::type y0(y0SEXP); Rcpp::traits::input_parameter< double >::type eps(epsSEXP); rcpp_result_gen = Rcpp::wrap(left(x1, y1, x2, y2, x0, y0, eps)); return rcpp_result_gen; END_RCPP } // on LogicalVector on(double x1, double y1, double x2, double y2, NumericVector x0, NumericVector y0, double eps); RcppExport SEXP interp_on(SEXP x1SEXP, SEXP y1SEXP, SEXP x2SEXP, SEXP y2SEXP, SEXP x0SEXP, SEXP y0SEXP, SEXP epsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type x1(x1SEXP); Rcpp::traits::input_parameter< double >::type y1(y1SEXP); Rcpp::traits::input_parameter< double >::type x2(x2SEXP); Rcpp::traits::input_parameter< double >::type y2(y2SEXP); Rcpp::traits::input_parameter< NumericVector >::type x0(x0SEXP); Rcpp::traits::input_parameter< NumericVector >::type y0(y0SEXP); Rcpp::traits::input_parameter< double >::type eps(epsSEXP); rcpp_result_gen = Rcpp::wrap(on(x1, y1, x2, y2, x0, y0, eps)); return rcpp_result_gen; END_RCPP } // inHull LogicalVector inHull(List triObj, NumericVector x, NumericVector y, double eps); RcppExport SEXP interp_inHull(SEXP triObjSEXP, SEXP xSEXP, SEXP ySEXP, SEXP epsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type triObj(triObjSEXP); Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); Rcpp::traits::input_parameter< double >::type eps(epsSEXP); rcpp_result_gen = Rcpp::wrap(inHull(triObj, x, y, eps)); return rcpp_result_gen; END_RCPP } // onHull LogicalVector onHull(List triObj, NumericVector x, NumericVector y, double eps); RcppExport SEXP interp_onHull(SEXP triObjSEXP, SEXP xSEXP, SEXP ySEXP, SEXP epsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type triObj(triObjSEXP); Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); Rcpp::traits::input_parameter< double >::type eps(epsSEXP); rcpp_result_gen = Rcpp::wrap(onHull(triObj, x, y, eps)); return rcpp_result_gen; END_RCPP } interp/R/0000755000176200001440000000000013605152164011757 5ustar liggesusersinterp/R/outer.convex.hull.R0000644000176200001440000000106413605104165015503 0ustar liggesusersouter.convhull<-function(cx,cy,px,py,FUN,duplicate="remove",...) { nx<-length(cx) ny<-length(cy) np<-length(px) if(length(py)!=np) stop("length of cx and cy differ") if (is.character(FUN)) FUN <- get(FUN, mode = "function", inherits = TRUE) p.tr<-tri.mesh(px,py,duplicate) ans<-matrix(FUN(matrix(cx, nx, ny), matrix(cy, nx, ny, byrow = TRUE), ...), nx, ny) ans[!in.convex.hull(p.tr,matrix(cx, nx, ny), matrix(cy, nx, ny, byrow = TRUE))]<-NA ans } interp/R/print.summary.voronoi.R0000644000176200001440000000025213605104165016421 0ustar liggesusersprint.summary.voronoi<-function(x,...) { cat("voronoi mosaic\n") cat("Call:", deparse(x$call),"\n") cat(x$nn, "nodes\n") cat(x$nd, "dummy nodes\n") } interp/R/voronoi.findrejectsites.R0000644000176200001440000000130213605104165016753 0ustar liggesusersvoronoi.findrejectsites <- function(voronoi.obj, xmin, xmax, ymin, ymax) { ## Given a voronoi object, find the reject sites, i.e. those sites ## with one of their vertices outside the bounded rectangle given by ## (xmin,ymin) and (xm ax,ymax). ## Return a vector `rejects': site N is a reject iff rejects[i] is T. nsites <- length(voronoi.obj$tri$x) rejects <- logical(nsites) outsiders <- ((voronoi.obj$x > xmax) | (voronoi.obj$x < xmin) | (voronoi.obj$y > ymax) | (voronoi.obj$y < ymin)) ## In the list below, each site could be rejected more than once. rejects[c(voronoi.obj$p1[outsiders], voronoi.obj$p2[outsiders], voronoi.obj$p3[outsiders])] <- TRUE; rejects } interp/R/print.summary.triSht.R0000644000176200001440000000042413605104165016204 0ustar liggesusersprint.summary.triSht<-function(x,...) { cat("triangulation:\n") cat("Call:", deparse(x$call),"\n") cat("number of nodes:",x$n,"\n") cat("number of arcs:",x$na,"\n") cat("number of boundary nodes:",x$nb,"\n") cat("number of triangles:",x$nt,"\n") } interp/R/triangles.R0000644000176200001440000000111613605104165014067 0ustar liggesuserstriangles<-function(tri.obj){ if(!inherits(tri.obj,"triSht")) stop("tri.obj must be of class \"triSht\"") ret<-tri.obj$trlist colnames(ret)<-c("node1","node2","node3","tr1","tr2","tr3","arc1","arc2","arc3") ret } arcs<-function(tri.obj){ if(!inherits(tri.obj,"triSht")) stop("tri.obj must be of class \"triSht\"") ret<-cbind(tri.obj$arcs[,"from"],tri.obj$arcs[,"to"]) colnames(ret)<-c("from","to") ret } area<-function(tri.obj){ if(!inherits(tri.obj,"triSht")) stop("tri.obj must be of class \"triSht\"") ret<-tri.obj$cclist[,"area"] ret } interp/R/plot.voronoi.polygons.R0000644000176200001440000000303513605104165016422 0ustar liggesusers"plot.voronoi.polygons" <- function(x,which, color=TRUE, isometric=TRUE,...){ lx <- length(x) if(missing(which)) which <- 1:lx ## exclude border polygons represented as NULL, ## intersect ensures working behaviour for eventuelly ## given argument "which" (otherwise 1:lx) which <- intersect(which,(1:lx)[!(unlist(lapply(x, is.null)))]) if(any(is.na(which))) stop("border polygons may not be choosen to plot") lw <- length(which) lmax <- function(x) apply(x,2,max) lmin <- function(x) apply(x,2,min) lmean <- function(x) apply(x,2,mean) xy.max <- apply(sapply(x[which],lmax),1,max) xy.min <- apply(sapply(x[which],lmin),1,min) xy.mean <- sapply(x[which],lmean) xlim=c(xy.min["x"]- 0.1*(xy.max["x"]-xy.min["x"]), xy.max["x"]+ 0.1*(xy.max["x"]-xy.min["x"])) ylim=c(xy.min["y"]- 0.1*(xy.max["y"]-xy.min["y"]), xy.max["y"]+ 0.1*(xy.max["y"]-xy.min["y"])) if(isometric){ xrange <- diff(xlim) yrange <- diff(ylim) maxrange <- max(xrange,yrange) midx <- sum(xlim)/2 midy <- sum(ylim)/2 xlim <- midx+(xlim-midx)/xrange*maxrange ylim <- midy+(ylim-midy)/yrange*maxrange } plot(x[[which[1]]],type="n",xlim=xlim, ylim=ylim,...) colors <- heat.colors(lw) j <- 0 for(i in which){ j <- j+1 polygon(x[[i]],col=colors[j]) text(xy.mean[,j]["x"],xy.mean[,j]["y"],i) } title(paste("plot of",deparse(substitute(x)))) } interp/R/on.convex.hull.R0000644000176200001440000000047213605104165014763 0ustar liggesuserson.convex.hull<-function(tri.obj,x,y,eps=1E-16) { if(!inherits(tri.obj,"triSht")) stop("tri.obj must be of class \"triSht\"") if(length(x)!=length(y)) stop("x and y must be of same length") n<-length(x) if(n==0) stop("length of x (resp. y) is 0") onhull <- onHull(tri.obj,x,y,eps) onhull } interp/R/circles.R0000644000176200001440000000041613605104165013525 0ustar liggesuserscircles <- function(x,y,r,...){ n <- length(x) if(length(y)!=n || length(r)!=n) stop("arguments should be of same length!") phi <- seq(0,2*pi,length=360) for(i in 1:n){ lines(c(x[i]+r[i]*cos(phi),x[i]+r[i]),c(y[i]+r[i]*sin(phi),y[i]),type="l",...) } } interp/R/identify.triSht.R0000644000176200001440000000032613605104165015170 0ustar liggesusersidentify.triSht<-function(x,...) { if(!inherits(x,"triSht")) stop("x must be of class \"tri\"") labels<-paste("(",round(x$x,5),",",round(x$y,5),")", sep ="") identify(x$x,x$y,labels=labels) } interp/R/print.triSht.R0000644000176200001440000000072613605104165014515 0ustar liggesusersprint.triSht<-function(x,...) { if(!inherits(x,"triSht")) stop("x must be of class \"triSht\"") cat("Delauney triangulation, node and triangle indices:\n") cat("triangle: nodes (a,b,c), neighbour triangles [i,j,k] \n") for (i in 1:x$nt) { cat(i,": (",x$trlist[i,"i1"],",",x$trlist[i,"i2"],",",x$trlist[i,"i3"],"), [",x$trlist[i,"j1"],",",x$trlist[i,"j2"],",",x$trlist[i,"j3"],"]\n",sep="") } cat("boundary nodes: ", x$chull, "\n", sep=" ") } interp/R/voronoi.polygons.R0000644000176200001440000000071113605104165015443 0ustar liggesusers# from Denis White : voronoi.polygons <- function (voronoi.obj) { nsites <- length(voronoi.obj$tri$x) polys <- list() j <- 0 for (i in 1:nsites) { vs <- voronoi.findvertices(i, voronoi.obj) if (length(vs) > 0) { polys[[i]] <- cbind (x=voronoi.obj$x[vs], y=voronoi.obj$y[vs]) } else { polys[[i]] <- NULL } } class(polys)<-"voronoi.polygons" polys } interp/R/tri.mesh.R0000644000176200001440000000356313605104165013640 0ustar liggesuserstri.mesh <- function(x,y=NULL,duplicate="error"){ if(is.null(x)) stop("argument x missing.") if(is.null(y)){ x1<-x$x y1<-x$y if (is.null(x1) || is.null(y1)) stop("argument y missing and x contains no $x or $y component.") } else { x1<-x y1<-y } n <- length(x1) if(length(y1)!=n) stop("length of x and y differ.") ## handle duplicate points: xy <- paste(x1, y1, sep =",") i <- match(xy, xy) if(duplicate!="error") { if(duplicate!="remove" & duplicate!="error" & duplicate!="strip"){ stop("possible values for \'duplicate\' are \"error\", \"strip\" and \"remove\"") } else{ if(duplicate=="remove") ord <- !duplicated(xy) if(duplicate=="strip") ord <- (hist(i,plot=FALSE,freq=TRUE,breaks=seq(0.5,max(i)+0.5,1))$counts==1) x1 <- x1[ord] y1 <- y1[ord] n <- length(x1) } } else if(any(duplicated(xy))) stop("duplicate data points") ans <- shull.deltri(x1,y1) nt <- length(ans$i1) ## note: triangles are enumerated in c++ starting with 0, so add 1 here ## points are enumerated started with 1 tri.obj<-list(n=ans$n,x=ans$x,y=ans$y, nt=ans$nt, trlist=ans$trlist, cclist=ans$cclist, nchull=ans$nch, chull=ans$ch, narcs=ans$na, arcs=cbind(ans$a1,ans$a2), call=match.call()) colnames(tri.obj$trlist) <- c("i1","i2","i3","j1","j2","j3","k1","k2","k3") colnames(tri.obj$cclist) <- c("x","y","r","area","ratio") colnames(tri.obj$arcs) <- c("from","to") class(tri.obj)<-"triSht" invisible(tri.obj) } interp/R/plot.voronoi.R0000644000176200001440000000671613605104165014562 0ustar liggesusers"plot.voronoi" <- function(x,add=FALSE, xlim=c(min(x$tri$x)- 0.1*diff(range(x$tri$x)), max(x$tri$x)+ 0.1*diff(range(x$tri$x))), ylim=c(min(x$tri$y)- 0.1*diff(range(x$tri$y)), max(x$tri$y)+ 0.1*diff(range(x$tri$y))), all=FALSE, do.points=TRUE, main="Voronoi mosaic", sub=deparse(substitute(x)), isometric=TRUE, ...) { if(isometric){ if(!all(xlim==c(min(x$tri$x)- 0.1*diff(range(x$tri$x)), max(x$tri$x)+ 0.1*diff(range(x$tri$x)))) || !all(ylim==c(min(x$tri$y)- 0.1*diff(range(x$tri$y)), max(x$tri$y)+ 0.1*diff(range(x$tri$y))))){ warning("isometric option not used as xlim or ylim explicitly given") } else { if(all){ xlim=range(x$x) ylim=range(x$y) } else { xlim=range(x$tri$x) ylim=range(x$tri$y) } xrange <- diff(xlim) yrange <- diff(ylim) maxrange <- max(xrange,yrange) midx <- sum(xlim)/2 midy <- sum(ylim)/2 xlim <- midx+(xlim-midx)/xrange*maxrange ylim <- midy+(ylim-midy)/yrange*maxrange } } else { if(!all(xlim==c(min(x$tri$x)- 0.1*diff(range(x$tri$x)), max(x$tri$x)+ 0.1*diff(range(x$tri$x)))) || !all(ylim==c(min(x$tri$y)- 0.1*diff(range(x$tri$y)), max(x$tri$y)+ 0.1*diff(range(x$tri$y))))){ warning("all option not used as xlim or ylim explicitly given") } else { if(all) { xlim<-c(min(x$x)-0.1*diff(range(x$x)), max(x$x)+0.1*diff(range(x$x))) ylim<-c(min(x$y)-0.1*diff(range(x$y)), max(x$y)+0.1*diff(range(x$y))) } } } n<-length(x$x) if(!add) { plot.new() plot.window(xlim=xlim,ylim=ylim,"") } if(do.points) points(x$x,x$y,...) for (i in 1:n) { if(x$node[i]) ## Triangle i has positive area. ## Connect circumcircle center of triangle i with neighbours: { ## Find neighbour triangles tns<-sort(c(x$n1[i],x$n2[i],x$n3[i])) for(j in 1:3) { ## Connect (if triangle exists and has positive area). if(tns[j]>0) { ## simple node if(x$node[tns[j]]) lines(c(x$x[i],x$x[tns[j]]), c(x$y[i],x$y[tns[j]]),...) } else if(tns[j]<0){ ## dummy node lines(c(x$x[i],x$dummy.x[-tns[j]]), c(x$y[i],x$dummy.y[-tns[j]]), lty="dashed",...) } } } } if(!add) title(main = main, sub =sub) } interp/R/interp.R0000644000176200001440000001142313605104165013402 0ustar liggesusersinterpp <- function(x, y=NULL, z, xo, yo=NULL, linear = TRUE, extrap = FALSE, duplicate = "error", dupfun = NULL, deltri = "shull"){ interp(x,y,z,xo,yo,linear,extrap,duplicate,dupfun,deltri, input="points", output="points") } interp <- function(x, y=NULL, z, xo = seq(min(x), max(x), length = nx), yo = seq(min(y), max(y), length = ny), linear = (method=="linear"), extrap = FALSE, duplicate = "error", dupfun = NULL, nx=40, ny=40, input="points", output = "grid", method="linear", deltri="shull") { if(method=="linear") linear <- TRUE ## handle sp data, save coordinate and value names is.sp <- FALSE sp.coord <- NULL sp.z <- NULL sp.proj4string <- NULL if(is.null(y)&&is.character(z)){ if(class(x)=="SpatialPointsDataFrame" && requireNamespace("sp", quietly=TRUE)) { sp.coord <- dimnames(sp::coordinates(x))[[2]] sp.z <- z sp.proj4string <- x@proj4string z <- x@data[,z] y <- sp::coordinates(x)[,2] x <- sp::coordinates(x)[,1] is.sp <- TRUE xo = seq(min(x), max(x), length = nx) yo = seq(min(y), max(y), length = ny) } else stop("either x,y,z are numerical or x is SpatialPointsDataFrame and z a name of a data column in x") } if(!(all(is.finite(x)) && all(is.finite(y)) && all(is.finite(z)))) stop("missing values and Infs not allowed") drx <- diff(range(x)) dry <- diff(range(y)) if(drx == 0 || dry == 0) stop("all data collinear") # other cases caught in Fortran code if(drx/dry > 10000 || drx/dry < 0.0001) stop("scales of x and y are too dissimilar") n <- length(x) nx <- length(xo) ny <- length(yo) if(length(y) != n || length(z) != n) stop("Lengths of x, y, and z do not match") dups_found <- isTRUE(anyDuplicated(cbind(x, y), MARGIN=1) != 0L) if (dups_found) { if(duplicate == "error") { stop("duplicate data points: need to set 'duplicate = ..' ") } else { ## duplicate != "error" xy <- paste(x, y, sep = ",") # trick for 'duplicated' (x,y)-pairs i <- match(xy, xy) if(duplicate == "user") dupfun <- match.fun(dupfun)#> error if it fails ord <- !duplicated(xy) if(duplicate != "strip") { centre <- function(x) switch(duplicate, mean = mean(x), median = median(x), user = dupfun(x)) z <- unlist(lapply(split(z,i), centre)) } else { z <- z[ord] } x <- x[ord] y <- y[ord] n <- length(x) } } if(method=="linear"|method=="akima"){ if(!linear) stop("method=\"akima\" (linear=FALSE) is currently under developement and not yet available!") if(deltri=="deldir"){ if(!linear) stop("method=\"akima\" (linear=FALSE) is not implemented for deltri=\"deldir\"!") triangles <- triang.list(deldir(x=x,y=y,z=z)) ans <- interpDeltri(xo,yo,z,triangles,input,output) } else if(deltri=="shull"){ ans <- interpShull(xo,yo,x,y,z,linear,input,output) if(output=="points") # back to vector friom matrix: ans$z <- c(ans$z) } else stop(paste("unknown triangulation method", deltri)) } else stop(paste("method=\"",method,"\" not implemented!",sep="")) ## prepare return value if (is.sp && requireNamespace("sp", quietly=TRUE)) { zm <- nx zn <- ny zvec <- c(ans$z) xvec <- c(matrix(rep(ans$x,zn),nrow=zm,ncol=zn,byrow=FALSE)) yvec <- c(matrix(rep(ans$y,zm),nrow=zm,ncol=zn,byrow=TRUE)) nona <- !is.na(zvec) ret <- data.frame(xvec[nona],yvec[nona],zvec[nona]) names(ret) <- c(sp.coord[1],sp.coord[2],sp.z) sp::coordinates(ret) <- sp.coord ret@proj4string <- sp.proj4string sp::gridded(ret) <- TRUE } else { if(output=="grid") ret <- list(x=ans$x,y=ans$y,z=matrix(ans$z,nx,ny)) else ret <- list(x=ans$x,y=ans$y,z=ans$z) } ret } interp/R/plot.triSht.R0000644000176200001440000000312013605104165014326 0ustar liggesusers plot.triSht<-function(x,add=FALSE,xlim=range(x$x), ylim=range(x$y),do.points=TRUE, do.labels=FALSE, isometric=TRUE, do.circumcircles=FALSE, segment.lty="dashed", circle.lty="dotted", ...) { if(!inherits(x,"triSht")) stop("x must be of class \"triSht\"") if(isometric){ xlim=range(x$x) ylim=range(x$y) xrange <- diff(xlim) yrange <- diff(ylim) maxrange <- max(xrange,yrange) midx <- sum(xlim)/2 midy <- sum(ylim)/2 xlim <- midx+(xlim-midx)/xrange*maxrange ylim <- midy+(ylim-midy)/yrange*maxrange } if(!add) plot(x$x,x$y,type="n", xlim=xlim, ylim=ylim) if(do.points) points(x$x,x$y) segments(x$x[x$arcs[,"from"]],x$y[x$arcs[,"from"]], x$x[x$arcs[,"to"]],x$y[x$arcs[,"to"]], lty=segment.lty, ...) if(do.labels){ midsx <- 1/3*(x$x[x$trlist[,1]]+x$x[x$trlist[,2]]+ x$x[x$trlist[,3]]) midsy <- 1/3*(x$y[x$trlist[,1]]+x$y[x$trlist[,2]]+ x$y[x$trlist[,3]]) text(midsx,midsy,1:x$nt,...) text(x$x+0.025*diff(xlim),x$y+0.025*diff(ylim),1:x$n,font=4, ...) arcmidsx <- 1/2*(x$x[x$arcs[,"from"]]+x$x[x$arcs[,"to"]]) arcmidsy <- 1/2*(x$y[x$arcs[,"from"]]+x$y[x$arcs[,"to"]]) text(arcmidsx+0.025*diff(xlim),arcmidsy+0.025*diff(ylim), 1:x$narcs,font=3,...) } if(do.circumcircles) circles(x$cclist[,"x"],x$cclist[,"y"],x$cclist[,"r"], lty=circle.lty, ...) } interp/R/voronoi.area.R0000644000176200001440000000662113605104165014507 0ustar liggesusersvoronoi.area <- function(voronoi.obj) { ## Compute the area of each Voronoi polygon. ## If the area of a polygon cannot be computed, NA is returned. ## ## TODO: currently, the list of Voronoi vertices (vs) of each site ## is found, but then discarded. They could be reused for other ## calls? nsites <- length(voronoi.obj$tri$x) areas <- double(nsites) for (i in 1:nsites) { vs <- voronoi.findvertices(i, voronoi.obj) if (length(vs) > 0) { areas[i] <- voronoi.polyarea( voronoi.obj$x[vs], voronoi.obj$y[vs]) } else { areas[i] <- NA } } areas } voronoi.findvertices <- function(site, vor) { ## Helper function. ## Return the ordered list of Voronoi vertices for site number SITE ## in the Voronoi tesselation. p <- cbind(vor$p1, vor$p2, vor$p3) a <- which(p == site, arr.ind=TRUE) vertices <- a[,1] #list of the vertice indexes. triples <- p[a[,1],] triples ## Now remove the entries that are not site. ## Need to take transpose, as `which' runs down by column, rather ## than by row, and we want to keep rows together. triples <- t(triples) pairs <- triples[ which (triples!= site)] m <- matrix(pairs, ncol=2, byrow=TRUE) ## Now go through the list of sites and order the vertices. We ## build up the list of vertices in the vector `orderedvs'. This ## vector is truncated to the exact size at the end of the function. ## To order the vertices of the Voronoi polygon associated with a ## site, we first find all vertices that are associated with a site. ## These will come in threes, from the array `triples'. We then ## remove the site number itself from the triples to come up with a ## list of pairs. e.g. trying to find the vertices for site 6: ## sites v number ## 3 9 6 6 ## 6 4 3 2 ## 9 6 7 3 ## 6 7 4 9 ## ## remove the `6': ## sites v number ## 3 9 6 ## 4 3 2 ## 9 7 3 ## 7 4 9 ## and then starting with site 3, we find each subsequent site. ## i.e. 3 then 9 (output v 6), then 7 (output v 3), then 4 (output v ## 9) then 3 (output v 2). We are now back to the starting site so ## the ordered list of vertices is 6, 3, 9, 2. orderedvs <- integer(30); vnum <- 1 orderedvs[vnum] <- vertices[1]; vnum <- 1 + vnum firstv <- m[1,1]; nextv <- m[1,2]; m[1,] <- -1; #blank 1st row out. looking <- TRUE while (looking) { ##cat(paste("looking for ", nextv, "\n")) t <- which(m == nextv, arr.ind=TRUE) if (length(t) == 0) { #could check length(t) != 1 ## cannot compute area... vnum <- 1; looking <- FALSE } else { t.row <- t[1,1] t.col <- t[1,2] orderedvs[vnum] <- vertices[t.row]; vnum <- 1 + vnum othercol <- (3 - t.col) #switch 1 to 2 and vice-versa. nextv <- m[ t.row, othercol] m[t.row,] <- -1 #blank this row out. if (nextv == firstv) looking <- FALSE } } orderedvs[1:vnum-1] #truncate vector to exact length. } voronoi.polyarea <- function (x, y) { ## Return the area of the polygon given by the points (x[i], y[i]). ## Absolute value taken in case coordinates are clockwise. ## Taken from the Octave implementation. ## Helper function. r <- length(x) p <- matrix(c(x, y), ncol=2, nrow=r) p2 <- matrix( c(y[2:r], y[1], -x[2:r], -x[1]), ncol=2, nrow=r) a <- abs(sum (p * p2 ) / 2) } interp/R/voronoi.mosaic.R0000644000176200001440000001450613605104165015053 0ustar liggesusers"voronoi.mosaic" <- function(x,y=NULL,duplicate="error") { dummy.node<-function(x0,y0,x1,y1,x2,y2,d) { # determine a direction orthogonal to p1--p2 # # p_1 # | # |d # p_0 ------>+ - - - - -> dummy_node # r | # V # p_2-------> # n # two versions, r and n # dx<- x2-x1 dy<- y2-y1 nx<- -dy ny<- dx rx<-(x1+x2)/2-x0 ry<-(y1+y2)/2-y0 lr<-sqrt(rx^2+ry^2) ln<-sqrt(nx^2+ny^2) # choose the numerically better version if(lr > ln) { vx<-rx/lr vy<-ry/lr if(in.convex.hull(ret$tri,x0,y0)) d <- d else d <- -d } else { vx<-nx/ln vy<-ny/ln eps<-1e-7 if(in.convex.hull(ret$tri,(x1+x2)/2+eps*vx,(y1+y2)/2+eps*vy)) d <- - d else d <- d } list(x=x0+d*vx,y=y0+d*vy) } if(class(x)=="tri"){ if(!is.null(x$tlist)) stop("this \"tri\" object has been created with tripack::tri.mesh,\n recreate it with interp::tri.mesh!\n The $call element gives a hint how it was created.") tri.obj <- x } else tri.obj<-tri.mesh(x=x,y=y,duplicate=duplicate) nt<-tri.obj$nt ret<-list(x=tri.obj$cclist[,"x"], y=tri.obj$cclist[,"y"], node=(tri.obj$cclist[,"area"]>0), area=tri.obj$cclist[,"area"], ratio=tri.obj$cclist[,"ratio"], radius=tri.obj$cclist[,"r"], n1=tri.obj$trlist[,"j1"], n2=tri.obj$trlist[,"j2"], n3=tri.obj$trlist[,"j3"], p1=tri.obj$trlist[,"i1"], p2=tri.obj$trlist[,"i2"], p3=tri.obj$trlist[,"i3"], tri=tri.obj) ret$dummy.x<-integer(0) ret$dummy.y<-integer(0) dummy.cnt<-0 dmax<-max(diff(range(ret$x)),diff(range(ret$y))) n<-length(ret$x) # add dummy nodes on the border of the triangulation for (i in 1:n) { if(ret$node[i]) # Triangle i has positive area. { # Find neighbour triangles tns<-sort(c(ret$n1[i],ret$n2[i],ret$n3[i])) ins <- order(c(ret$n1[i],ret$n2[i],ret$n3[i])) tn1<-tns[1] tn2<-tns[2] tn3<-tns[3] # Handle special cases on the border: # (This should better be done in the FORTRAN code!) if(any(tns==0)) { if(tns[2]!=0) { # Only one edge of i coincides with border. # Determine nodes of triangle i tr<-c(ret$p1[i],ret$p2[i],ret$p3[i]) # Which of these nodes are border nodes (2)? ns<-tr[on.convex.hull(ret$tri, ret$tri$x[tr], ret$tri$y[tr])] if(length(ns)==2) { # 2 points on hull i1<-ns[1] i2<-ns[2] # Find a dummy node pn<-dummy.node(ret$x[i],ret$y[i], ret$tri$x[i1],ret$tri$y[i1], ret$tri$x[i2],ret$tri$y[i2], dmax) dummy.cnt<- dummy.cnt+1 ret$dummy.x[dummy.cnt]<-pn$x ret$dummy.y[dummy.cnt]<-pn$y # update neighbour relation # (negative index indicates dummy node) if(ret$n1[i]==0) ret$n1[i]<- -dummy.cnt if(ret$n2[i]==0) ret$n2[i]<- -dummy.cnt if(ret$n3[i]==0) ret$n3[i]<- -dummy.cnt } # Other cases: # 1 point on hull -- should not happen at all # 3 points on hull -- should not happen here # see "else" tree } else { # Two edges of i coincide with border. # (= 3 points on hull ) # that means this triangle forms one corner of # the convex hull # Find out which edge of triangle i is not # on the border: (check if midpoints of edges lay # on hull) tr<-c(ret$p1[i],ret$p2[i],ret$p3[i]) edge<-list(from=tr[c(1,2,3)],to=tr[c(2,3,1)]) mx <- (ret$tri$x[edge$from]+ret$tri$x[edge$to])/2 my <- (ret$tri$y[edge$from]+ret$tri$y[edge$to])/2 eonb <- on.convex.hull(ret$tri,mx,my) # Find two dummy nodes for (id in 1:3){ if (eonb[id]){ pn<-dummy.node(ret$x[i],ret$y[i], ret$tri$x[edge$from[id]], ret$tri$y[edge$from[id]], ret$tri$x[edge$to[id]], ret$tri$y[edge$to[id]], dmax) dummy.cnt<- dummy.cnt+1 ret$dummy.x[dummy.cnt]<-pn$x ret$dummy.y[dummy.cnt]<-pn$y # update neighbour relation # (negative index indicates dummy node) if(ret$n1[i]==0) ret$n1[i]<- -dummy.cnt else if(ret$n2[i]==0) ret$n2[i]<- -dummy.cnt else if(ret$n3[i]==0) ret$n3[i]<- -dummy.cnt } } } } } else { # A triangle i with area 0: # This can't happen on the border (already removed in FORTRAN code!). # Do nothing. tmp<-0 } } ret$call <- match.call() class(ret) <- "voronoi" ret } interp/R/RcppExports.R0000644000176200001440000000364713605151405014402 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 interpDeltri <- function(x, y, zD, t, input = "points", output = "grid") { .Call('interp_interpDeltri', PACKAGE = 'interp', x, y, zD, t, input, output) } interpShull <- function(x, y, xD, yD, zD, linear = TRUE, input = "points", output = "grid") { .Call('interp_interpShull', PACKAGE = 'interp', x, y, xD, yD, zD, linear, input, output) } locpoly.partderiv.grid <- function(x, y, xD, yD, zD, kernel = "gaussian", h = as.numeric( c(0.25,0.25)), solver = "QR", degree = 3L, smoothpde = FALSE, akimaweight = FALSE, nweight = 25L) { .Call('interp_partDerivGrid', PACKAGE = 'interp', x, y, xD, yD, zD, kernel, h, solver, degree, smoothpde, akimaweight, nweight) } locpoly.partderiv.points <- function(x, y, xD, yD, zD, kernel = "gaussian", h = as.numeric( c(0.25,0.25)), solver = "QR", degree = 3L, smoothpde = FALSE, akimaweight = FALSE, nweight = 25L) { .Call('interp_partDerivPoints', PACKAGE = 'interp', x, y, xD, yD, zD, kernel, h, solver, degree, smoothpde, akimaweight, nweight) } nearest.neighbours <- function(x, y) { .Call('interp_nearestNeighbours', PACKAGE = 'interp', x, y) } shull.deltri <- function(x, y) { .Call('interp_shullDeltri', PACKAGE = 'interp', x, y) } triFind <- function(nT, xT, yT, i1, i2, i3, x, y) { .Call('interp_triFind', PACKAGE = 'interp', nT, xT, yT, i1, i2, i3, x, y) } left <- function(x1, y1, x2, y2, x0, y0, eps = 1E-16) { .Call('interp_left', PACKAGE = 'interp', x1, y1, x2, y2, x0, y0, eps) } on <- function(x1, y1, x2, y2, x0, y0, eps = 1E-16) { .Call('interp_on', PACKAGE = 'interp', x1, y1, x2, y2, x0, y0, eps) } inHull <- function(triObj, x, y, eps = 1E-16) { .Call('interp_inHull', PACKAGE = 'interp', triObj, x, y, eps) } onHull <- function(triObj, x, y, eps = 1E-16) { .Call('interp_onHull', PACKAGE = 'interp', triObj, x, y, eps) } interp/R/print.voronoi.R0000644000176200001440000000076113605104165014732 0ustar liggesusersprint.voronoi<-function(x,...) { if(!inherits(x,"voronoi")) stop("x must be of class \"voronoi\"") cat("voronoi mosaic:\n") cat("nodes: (x,y): neighbours (<0: dummy node)\n") for (i in 1:length(x$x)) { if(x$node[i]){ cat(i,": (",x$x[i],",",x$y[i],")",sep="") cat(":",x$n1[i],x$n2[i],x$n3[i],"\n",sep=" ") } } cat("dummy nodes: (x,y)\n") for (i in 1:length(x$dummy.x)) { cat(i,": (",x$dummy.x[i],",",x$dummy.y[i],")\n",sep="") } } interp/R/franke.R0000644000176200001440000000127113605104165013347 0ustar liggesusersfranke.fn <- function(x,y,fn=1){ switch(fn, "1"=0.75*exp(-((9*x-2)^2+(9*y-2)^2)/4)+ 0.75*exp(-((9*x+1)^2)/49-(9*y+1)/10)+ 0.5*exp(-((9*x-7)^2+(9*y-3)^2)/4)- 0.2*exp(-(9*x-4)^2-(9*y-7)^2), "2"=(tanh(9*y-9*x)+1)/9, "3"=(1.25+cos(5.4*y))/(6*(1+(3*x-1)^2)), "4"=exp(-81*((x-0.5)^2+(y-0.5)^2)/16)/3, "5"=exp(-81*((x-0.5)^2+(y-0.5)^2)/4)/3, "6"=sqrt(64-81*((x-0.5)^2+(y-0.5)^2))/9-0.5) } franke.data <- function(fn=1,ds=1,data){ ret <- cbind(x=data[[ds]]$x,y=data[[ds]]$y, z=franke.fn(data[[ds]]$x,y=data[[ds]]$y,fn)) list(x=ret[,"x"],y=ret[,"y"],z=ret[,"z"]) } interp/R/convex.hull.R0000644000176200001440000000075413605104165014353 0ustar liggesusersconvex.hull<-function(tri.obj, plot.it=FALSE, add=FALSE,...) { if(!inherits(tri.obj,"triSht")) stop("tri.obj must be of class \"triSht\"") ret<-list(x=tri.obj$x[tri.obj$chull], y=tri.obj$y[tri.obj$chull], i=tri.obj$chull) if(plot.it) { if (!add) { plot.new() plot.window(range(ret$x), range(ret$y), "") } lines(cbind(ret$x,ret$x[1]),cbind(ret$y,ret$y[1]), ...) invisible(ret) } else ret } interp/R/summary.triSht.R0000644000176200001440000000045313605104165015053 0ustar liggesuserssummary.triSht<-function(object, ...) { if(!inherits(object,"triSht")) stop("object must be of class \"triSht\"") ans<-list(n=object$n, na=object$narcs, nb=object$nchull, nt=object$nt, call=object$call) class(ans)<-"summary.triSht" ans } interp/R/tri.find.R0000644000176200001440000000043713605104165013621 0ustar liggesuserstri.find<-function(tri.obj,x,y) { if(!inherits(tri.obj,"triSht")) stop("tri.obj must be of class \"triSht\"") ans <- triFind(tri.obj$nt, tri.obj$x, tri.obj$y, tri.obj$trlist[,"i1"], tri.obj$trlist[,"i2"], tri.obj$trlist[,"i3"], x,y) ans } interp/R/summary.voronoi.R0000644000176200001440000000041013605104165015262 0ustar liggesuserssummary.voronoi<-function(object,...) { if(!inherits(object,"voronoi")) stop("object must be of class \"voronoi\"") ans<-list(nn=length(object$x), nd=length(object$dummy.x), call=object$call) class(ans)<-"summary.voronoi" ans } interp/R/locpoly.R0000644000176200001440000001147113605104165013565 0ustar liggesuserslocpoly <- function(x, y, z, xo = seq(min(x), max(x), length = nx), yo = seq(min(y), max(y), length = ny), nx = 40, ny = 40, input = "points", output = "grid", h = 0, kernel = "uniform", solver = "QR", degree = 3, pd = ""){ ## secondary use of the partial derivatives estimate for Akimas splines: ## use them directly grid- or pointwise. if(!(output %in% c("grid","points"))){ stop("unknown value for \"output\"!") } if(!(input %in% c("grid","points"))){ stop("unknown value for \"output\"!") } if(input=="grid"){ nx <- length(x) ny <- length(y) if(dim(z)[1]!=nx | dim(z)[2]!=ny) stop("wrong dimensions of x, y, and z!") x <- matrix(rep(x,ny),nx,ny) y <- t(matrix(rep(y,nx),ny,nx)) } if(pd=="all"){ if(output=="grid"){ ans <- locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree) ans$x=xo ans$y=yo } else { ans <- locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree) ans$x=xo ans$y=yo } } else if(pd==""){ if(output=="grid") ans <- list(x=xo,y=yo,z=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$z) else ans <- list(x=xo,y=yo,z=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$z) } else if(pd=="x"){ if(degree>0){ if(output=="grid") ans <- list(x=xo,y=yo,zx=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zx) else ans <- list(x=xo,y=yo,zx=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zx) } else stop("need degree>0 for pd=\"x\"") } else if(pd=="y"){ if(degree>0){ if(output=="grid") ans <- list(x=xo,y=yo,zy=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zy) else ans <- list(x=xo,y=yo,zy=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zy) } else stop("need degree>0 for pd=\"y\"") } else if(pd=="xx"){ if(degree>1){ if(output=="grid") ans <- list(x=xo,y=yo,zxx=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zxx) else ans <- list(x=xo,y=yo,zxx=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zxx) } else stop("need degree>1 for pd=\"xx\"") } else if(pd=="yy"){ if(degree>1){ if(output=="grid") ans <- list(x=xo,y=yo,zyy=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zyy) else ans <- list(x=xo,y=yo,zyy=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zyy) } else stop("need degree>1 for pd=\"yy\"") } else if(pd=="xy"){ if(degree>1){ if(output=="grid") ans <- list(x=xo,y=yo,zxy=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zxy) else ans <- list(x=xo,y=yo,zxy=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zxy) } else stop("need degree>1 for pd=\"xy\"") } else if(pd=="xxx"){ if(degree>2){ if(output=="grid") ans <- list(x=xo,y=yo,zxxx=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zxxx) else ans <- list(x=xo,y=yo,zxxx=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zxxx) } else stop("need degree>2 for pd=\"xxx\"") } else if(pd=="yyy"){ if(degree>2){ if(output=="grid") ans <- list(x=xo,y=yo,zyyy=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zyyy) else ans <- list(x=xo,y=yo,zyyy=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zyyy) } else stop("need degree>2 for pd=\"yyy\"") } else if(pd=="xxy"){ if(degree>2){ if(output=="grid") ans <- list(x=xo,y=yo,zxxy=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zxxy) else ans <- list(x=xo,y=yo,zxxy=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zxxy) } else stop("need degree>2 for pd=\"xxy\"") } else if(pd=="xyy"){ if(degree>2){ if(output=="grid") ans <- list(x=xo,y=yo,zxyy=locpoly.partderiv.grid(xo,yo,x,y,z,kernel,h,solver,degree)$zxyy) else ans <- list(x=xo,y=yo,zxyy=locpoly.partderiv.points(xo,yo,x,y,z,kernel,h,solver,degree)$zxyy) } else stop("need degree>2 for pd=\"xyy\"") } else stop(paste("unsupported value for pd: ", pd, "\nonly partial derivatives of order up to 3 can be estimated (needs degree=3)!")) ans } interp/R/in.convex.hull.R0000644000176200001440000000061013605152164014751 0ustar liggesusers in.convex.hull<-function(tri.obj,x,y,eps=1E-16,strict=TRUE) { if(!inherits(tri.obj,"triSht")) stop("tri.obj must be of class \"triSht\"") if(length(x)!=length(y)) stop("x and y must be of same length") n<-length(x) if(n==0) stop("length of x (resp. y) is 0") inhull <- inHull(tri.obj,x,y,eps) if(!strict) inhull <- inhull | onHull(tri.obj,x,y,eps) inhull } interp/MD50000644000176200001440000000724313605457471012105 0ustar liggesuserse2a3857ed2433b8a8fe6cc23a74c7852 *ChangeLog f77092a27c8a24e02b36546bcbcc6774 *DESCRIPTION c229e70a6b911b54cfcb08dd8f05077f *NAMESPACE d836fdae3160d04afe7e7a9faa42e167 *R/RcppExports.R 9366104c7af32ceb91f8057f5a8ae994 *R/circles.R 43d0e424d86ed1e7d650b9bbb2869dc0 *R/convex.hull.R 120c6e6c07d4a97e1d3cf159328e3120 *R/franke.R 6da3b8ed056ddb8eb682c056ebfcfa43 *R/identify.triSht.R c4875927ee0ae3bd24ac92dcb7d6f2dd *R/in.convex.hull.R f8ab1118c742eef6fd740bbf7f4f0073 *R/interp.R 9833f5522f2e5202b2d689d6407acfb1 *R/locpoly.R 6a98a58847c9310f1367c87b52e9d0c5 *R/on.convex.hull.R 3ce539dcf54744edded619a2beab96ce *R/outer.convex.hull.R dd03dca8395dff9ab94e71cbcc594ce6 *R/plot.triSht.R 499b778135539d8360f8e8d0299ee0bc *R/plot.voronoi.R c88829a91eb96202cd2235eb4e885074 *R/plot.voronoi.polygons.R a2b17f0993309151115b18d1c7325159 *R/print.summary.triSht.R 3bc54bdc34cdaf0478d68e8636ba6bda *R/print.summary.voronoi.R 2794c2f770e3b837310d4e622f3eb09a *R/print.triSht.R fc70baa8e7e0469733345b3a491f3f87 *R/print.voronoi.R 12c5dd1ed119ebef483ef506e830d61a *R/summary.triSht.R 2b81fe1ab78035e35594e127c7ac4f02 *R/summary.voronoi.R 8e509a7f5f682bf8059fc0f063bf9c39 *R/tri.find.R 0aa4f93ca29eeebdc428e2c3996785b3 *R/tri.mesh.R c62d100fb0ce52349152cc2a92761d47 *R/triangles.R fb9249df5cd29bdc56eae7f32a8c0eed *R/voronoi.area.R 38d760fef74d77d7c357d28103e8b682 *R/voronoi.findrejectsites.R e7adb787c0c8ba474d5b1150ee15e298 *R/voronoi.mosaic.R c4121448ccfe6336aa3e5af236015eb5 *R/voronoi.polygons.R 0c7f664a0a380d50654b4c4cff598830 *README 91bf0772cf7358446293ab6445fe6b72 *data/franke.rda f735aee020bdf2df7fa80d0f6f5ce932 *man/arcs.Rd 9773f4d5ac4c71bd61d1ec47b17da426 *man/area.Rd 7e74c1f6293eace65e8f4bc15427ca24 *man/circles.Rd 0512ee144ce59c58a831f10e5dcba3f8 *man/convex.hull.Rd dc7e61939b31acce1e4ad460eeaad407 *man/franke.data.Rd ac912bc7eb77ad9de41acb8bc5ac53ee *man/identify.tri.Rd 0b1e7b35a1b64aced1a9ca4f4f22bb06 *man/interp-package.Rd 167c3351479b0fc0c19b985009253003 *man/interp.Rd 5794d1c25b5a19c02d413516138344fb *man/interpp.Rd 13d42d063c835ffc669eea36f0f0a67f *man/locpoly.Rd f51e4208d047074637cf5c2a0e2891af *man/nearest.neighbours.Rd 99f41c8ed9e06f53eab3bdb4ebbe93aa *man/on.Rd d681503a4707c54da35d76844c4ece12 *man/on.convex.hull.Rd f12752daa55efd48cb12aa3ae49fae11 *man/outer.convhull.Rd faeade4dc43d56685d3badfbc134046a *man/plot.triSht.Rd df8b271b5dc86553b8b502447a3260ac *man/plot.voronoi.Rd d25f498cbf1b3246dcbfdf18bcba6460 *man/plot.voronoi.polygons.Rd aa7badd271063cb2d27daa806e0739a3 *man/print.summary.triSht.Rd 2757c161ced4fd601e118b16ce6a440e *man/print.summary.voronoi.Rd 93b0f84796f548e5f96096acc0d73862 *man/print.triSht.Rd 6395f75f2d90eebf3bcd19a6f601ff69 *man/print.voronoi.Rd 1668800ee1cdbd4f4eb4ecc88fe5f463 *man/summary.triSht.Rd d181e29a31fa093105df6819e639d666 *man/summary.voronoi.Rd e662b7d280cf566cf07d657ca8d5ce91 *man/tri.find.Rd 66575cccac77ec7f9ffe0e269f7606db *man/tri.mesh.Rd 32d4c1e1e1e0ccea67bf2359cd46f3dc *man/triSht.Rd 66896cb15230471ef82e28acc91109dd *man/triangles.Rd fc81e7afc72a5c21b8e0bf36debc7a54 *man/voronoi.Rd 7681e4f9326438ca97f380192d37d008 *man/voronoi.area.Rd a9ed1de2f01c84de3156ed8d15e84806 *man/voronoi.findrejectsites.Rd be4edf843d59c15c40dcea1800f3ce3f *man/voronoi.mosaic.Rd ee36cf86ad5120f0fc6c85e1c1fd2bea *man/voronoi.polygons.Rd 8083990cf5d0bd3645d398f4c115eece *src/RcppExports.cpp 8fd2f228cdce672dc5301f09f795b447 *src/common.cpp 90dccc6c3a7d2bb198428c7774a64a4a *src/init.c ab5107215431b95a975fceb6a45852fc *src/interp.cpp 2b728c6c0c01099dc5f68ea698ca70fa *src/interp.h e0eb7eead013b07662e0312d15896a2b *src/partDeriv.cpp fb383de33533f716db1edd9e67690ba5 *src/s_hull_pro.cpp 4d783a74d7ee04ea2161d28a537ab2ce *src/s_hull_pro.h d043e562f24c428884afd53b59d3aed7 *src/shullDeltri.cpp