Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
tanushree04 committed Jan 9, 2024
1 parent 8796ba7 commit 840f08a
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 0 deletions.
26 changes: 26 additions & 0 deletions tests/models/test_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from workflow.lib.inputs import Inputs
from workflow.lib.models import Models
import pytest
import os
import shutil

def test_model_downloads():

objA = Inputs
objA.__init__(Inputs, '2023','comstock_amy2018_release_2','18')

id = ['bldg0000001','bldg0000002']
folder_name = 'saved'
shutil.rmtree(folder_name, ignore_errors=True)

objB = Models
objB.download_models(id, objA, folder_name)


save_folder_1 = os.path.join(folder_name, id[0])
save_folder_2 = os.path.join(folder_name, id[1])

assert os.path.exists(folder_name)
assert os.path.exists(save_folder_1)
assert os.path.exists(save_folder_2)

10 changes: 10 additions & 0 deletions workflow/lib/inputs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import boto3 # This is not called directly, but must be installed for Pandas to read files from S3
import pandas as pd

class Inputs:
"""Class to assign inputs for running buildstock models in Alfalfa."""

def __init__(self, year, dataset_name, upgrade):
self.year = year
self.dataset_name = dataset_name
self.upgrade = upgrade
38 changes: 38 additions & 0 deletions workflow/lib/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import boto3 # This is not called directly, but must be installed for Pandas to read files from S3
import pandas as pd
import requests
import gzip
import os
from workflow.lib.inputs import Inputs
from urllib.parse import urljoin, urlsplit, urlunsplit
from pathlib import Path
from io import BytesIO


class Models:

"""Class to download models from defined inputs"""

def download_models(id, obj, folder_path):

os.makedirs(folder_path, exist_ok=True) # Create folder if it doesn't exist

for i in id:
# TO DO: check if this url is different for other years/resstock
osm_url = f'https://oedi-data-lake.s3.amazonaws.com/nrel-pds-building-stock/end-use-load-profiles-for-us-building-stock/{obj.year}/{obj.dataset_name}/building_energy_models/upgrade={obj.upgrade}/{i}-up{obj.upgrade}.osm.gz'
response = requests.get(osm_url, stream=True)

save_folder = os.path.join(folder_path, i)
os.makedirs(save_folder, exist_ok=True) # Create folder using filename if it doesn't exist
osm_path = os.path.join(save_folder, i + '.osm')

if response.status_code == 200:
with gzip.GzipFile(fileobj=BytesIO(response.content), mode='rb') as gz:
extracted_content = gz.read()
with open(osm_path, 'wb') as extracted_file:
extracted_file.write(extracted_content)

print(f"File downloaded and extracted to {osm_path}")
else:
print(f"Failed to download the file: {response.status_code}")

0 comments on commit 840f08a

Please sign in to comment.