This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import arcpy | |
from arcpy.sa import * | |
arcpy.CheckOutExtension("Spatial") | |
import numpy as np | |
import pandas as pd | |
##################################################################################################################### | |
def catcsv2raster(inCSV, Value, inTemplate, outRaster, dtype='Int', idName='COMID'): | |
''' |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#FTP location | |
ftpdir <- 'ftp://newftp.epa.gov/EPADataCommons/ORD/NHDPlusLandscapeAttributes/StreamCat/States/' | |
#Your output directory | |
out_dir <- 'D:/Lixo/' | |
#Desired table (change to name of desired table) | |
tables <- c('NLCD2001RipBuf100_CA','Kffact_CA','ImperviousSurfaces2001_CA') | |
#Get URL, split returned list, select out only desired tables by name ('table' above) | |
library(RCurl) | |
url_list <- getURL(ftpdir, dirlistonly = TRUE) | |
url_list <- strsplit(url_list, split = '\r\n')[[1]] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#FTP location | |
ftpdir <- 'ftp://newftp.epa.gov/EPADataCommons/ORD/NHDPlusLandscapeAttributes/StreamCat/HydroRegions/' | |
#Desired table (change to name of desired table) | |
table <- 'PredictedBioCondition' | |
#Get URL, split returned list, select out only desired tables by name ('table' above) | |
library(RCurl) | |
url_list <- getURL(ftpdir, dirlistonly = TRUE) | |
url_list <- strsplit(url_list, split = '\r\n')[[1]] | |
url_list <- url_list[grep(table, url_list)] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#States need to be in equal area projection | |
#5070 is EPSG code for the USGS Alber's projection | |
states <- st_transform(states, crs = 5070) | |
#Create centroids of states | |
cntr <- st_centroid(states) | |
#Bind these coordinates to the centroid feature | |
cntr <- cbind(cntr, st_coordinates(cntr)) | |
#Get centroid for conterminous US | |
xy <- states %>% st_union() %>% | |
st_centroid() %>% st_coordinates() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
library(jsonlite);library(sf);library(sp);library(geojsonio) | |
watershed = function(state, lon, lat, sf=TRUE){ | |
p1 = 'https://streamstats.usgs.gov/streamstatsservices/watershed.geojson?rcode=' | |
p2 = '&xlocation=' | |
p3 = '&ylocation=' | |
p4 = '&crs=4326&includeparameters=false&includeflowtypes=false&includefeatures=true&simplify=true' | |
query <- paste0(p1, state, p2, toString(lon), p3, toString(lat), p4) | |
mydata <- fromJSON(query, simplifyVector = FALSE, simplifyDataFrame = FALSE) | |
poly_geojsonsting <- toJSON(mydata$featurecollection[[2]]$feature, auto_unbox = TRUE) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Read in gages data and convert to spatial points data frame | |
#Give it the **pts** CRS and reproject to **pts2** CRS | |
#Select out Portland and use `gBuffer` from `rgeos` package with width = 50,000 meters. | |
#Use `over` function from `sp` package to identify overlapping points with 50 km buffer. | |
library(sp); library(rgeos) | |
gages <- read.csv('./data/StreamGages.csv') | |
gages <- SpatialPointsDataFrame(gages[c('LON_SITE','LAT_SITE')], gages) | |
gages@proj4string <- pts@proj4string |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Read in gages data and convert to spatial points data frame | |
#Give it the **pts** CRS and reproject to **pts2** CRS | |
#Select out Portland and use `gDistance` from `rgeos` package with portand as x and gages as y in the function. | |
#Sum across TRUE/FALSE values in query. R will count TRUE == 1 and FALSE == 0. | |
library(sp); library(rgeos) | |
gages <- read.csv('./data/StreamGages.csv') | |
gages <- SpatialPointsDataFrame(gages[c('LON_SITE','LAT_SITE')], gages) | |
gages@proj4string <- pts@proj4string |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from StreamCat_functions import dbf2DF | |
pre = 'D:/NHDPlusV21/NHDPlusGL/NHDPlus04' | |
fline = dbf2DF('D%s/NHDSnapshot/Hydrography/NHDFlowline.dbf' % pre) | |
flow = dbf2DF('%s/NHDPlusAttributes/PlusFlow.dbf' pre)[['TOCOMID','FROMCOMID']] | |
def recurs(val, ups): | |
print val | |
ups = ups + flow.ix[flow.TOCOMID == val].FROMCOMID.tolist() | |
if 0 in ups: |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Code loops through StreamCat files on drive and combines into long table | |
#Also removes ancillary columns | |
combine_streamcat = function(x, wd){ | |
hydro.rgns <- c("01","02","03S","03N","03W","04","05","06","07","08","09","10L","10U","11","12","13","14","15","16","17","18") | |
for(i in 1:length(hydro.rgns)){ | |
print(hydro.rgns[i]) | |
if(i == 1){ | |
outDF = read.csv(paste0(wd, x, '_Region', hydro.rgns[i], '.csv')) | |
}else{ | |
tmpDF = read.csv(paste0(wd, x, '_Region', hydro.rgns[i], '.csv')) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import pandas as pd | |
#Read in .csv file | |
nlcd = pd.read_csv('NLCD2011_FINAL.csv') | |
#Select desired columns - selects 1st column and then 23rd column to end | |
nlcd = nlcd.iloc[:, np.r_[:1, 23:len(nlcd.columns)]] | |
#Strip out 'Ws' string from column names that contain it | |
newnames = [w.replace('Ws', '') for w in nlcd.columns] | |
#rename columns | |
nlcd.columns = newnames |
NewerOlder