-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathdata.py
112 lines (107 loc) · 4.36 KB
/
data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 06 09:48:07 2014
@author: Ruben Baetens
"""
import os
import numpy as np
import stats
def get_clusters(employment, **kwargs):
'''
Find the clusters for weekdays, saturday and sunday for a household member
of the given eployment type based on the Crosstables given at
# http://homepages.vub.ac.be/~daerts/Occupancy.html
'''
#first go the the correct location
cdir = os.getcwd()
PATH = '../Data/Aerts_Occupancy/Crosstables/'
os.chdir(PATH)
#create an empty dictionary
keys = ['wkdy', 'sat', 'son']
cluDict = dict()
##########################################################################
# we find the cluster for each of the daytypes for the given employment
# in 'Crosstable_employment.txt'
for key in keys:
order = ['U12','FTE','PTE','Unemployed','Retired','School']
emp_i = order.index(employment)
data = np.loadtxt('Crosstable_Employment_'+key+'.txt', float).T[emp_i]
rnd = np.random.random()
cluster = stats.get_probability(rnd, data, p_type='prob')
cluDict.update({key:cluster})
##########################################################################
# and return the final cluster id's
os.chdir(cdir)
return cluDict
def get_occDict(cluster, **kwargs):
'''
Create the dictionary with occupancy data based on the files retrieved from
Aerts et al. as given at http://homepages.vub.ac.be/~daerts/Occupancy.html
and stored in 'StROBe/Data/Aerts_Occupancy'.
'''
#first go the the correct location
cdir = os.getcwd()
DATA_PATH = '../Data/Aerts_Occupancy'
PATH = DATA_PATH + '/Pattern' + str(cluster)
os.chdir(PATH)
# create an empty dictionary
occDict = dict()
##########################################################################
# first we load the occupancy start states 'ss' from StartStates.txt
ss = dict()
data = np.loadtxt('StartStates.txt', float)
for i in range(len(data)):
ss.update({str(i+1):data[i]})
# and add the 'ss' data to the occupancy dictionary
occDict.update({'ss':ss})
##########################################################################
# Second we load the occupancy transitions state probabilities 'os'
# from TransitionProbability.txt
data = np.loadtxt('TransitionProbability.txt', float)
for i in range(3):
os_i = dict()
for j in range(48):
os_i.update({str(j+1):data[i*48+j]})
# and add the 'os_i' data to the occupancy dictionary
occDict.update({'os_'+str(i+1):os_i})
##########################################################################
# Third we load the Markov time density 'ol' from DurationProbability.txt
data = np.loadtxt('DurationProbability.txt', float)
for i in range(3):
ol_i = dict()
for j in range(48):
ol_i.update({str(j+1):data[i*48+j]})
# and add the 'osn_i' data to the occupancy dictionary
occDict.update({'ol_'+str(i+1):ol_i})
##########################################################################
# and return the final occDict
os.chdir(cdir)
return occDict
def get_actDict(cluster, **kwargs):
'''
Create the dictionary with activity data based on the files retrieved from
Aerts et al. as given at http://homepages.vub.ac.be/~daerts/Activity.html
and stored in 'StROBe/Data/Aerts_activity'.
'''
#first go the the correct location
cdir = os.getcwd()
DATA_PATH = '../Data/Aerts_Activities'
os.chdir(DATA_PATH)
# create an empty dictionary
actDict = dict()
##########################################################################
# first we define the dictionary used as legend for the load file
act = {0:'pc', 1:'food', 2:'vacuum', 3:'iron', 4:'tv', 5:'audio',
6:'dishes', 7:'washing', 8:'drying', 9:'shower'}
##########################################################################
# Second we load the activity proclivity functions
# from Patter*cluster*.txt
FILNAM = 'Pattern'+str(cluster)+'.txt'
data = np.loadtxt(FILNAM, float)
for i in range(10):
actDict.update({act[i]:data.T[i]})
##########################################################################
# and return the final actDict
actDict.update({'period':600, 'steps':144})
os.chdir(cdir)
return actDict