1 | #!/usr/bin/env python |
---|
2 | |
---|
3 | ''' |
---|
4 | mda2nx |
---|
5 | ========= |
---|
6 | |
---|
7 | Convert MDA file to NeXus |
---|
8 | ''' |
---|
9 | |
---|
10 | |
---|
11 | ########### SVN repository information ################### |
---|
12 | # $Date: 2013-02-26 06:27:06 +0000 (Tue, 26 Feb 2013) $ |
---|
13 | # $Author: jemian $ |
---|
14 | # $Revision: 1230 $ |
---|
15 | # $URL$ |
---|
16 | # $Id: mda2nx.py 1230 2013-02-26 06:27:06Z jemian $ |
---|
17 | ########### SVN repository information ################### |
---|
18 | |
---|
19 | |
---|
20 | import mda |
---|
21 | import datetime |
---|
22 | import h5py |
---|
23 | import os |
---|
24 | import sys |
---|
25 | import nxh5_lib |
---|
26 | |
---|
27 | |
---|
28 | def process(mdaFile): |
---|
29 | if os.path.exists(mdaFile): |
---|
30 | nxFile = os.path.splitext(mdaFile)[0] + os.path.extsep + 'h5' |
---|
31 | data = mda.readMDA(mdaFile) |
---|
32 | scan_number = data[0]['scan_number'] |
---|
33 | rank = data[0]['rank'] |
---|
34 | |
---|
35 | f = nxh5_lib.makeFile(nxFile, file_name=nxFile, |
---|
36 | file_time=str(datetime.datetime.now()), |
---|
37 | creator="mda2nx.py", |
---|
38 | HDF5_Version=h5py.version.hdf5_version, |
---|
39 | h5py_version=h5py.version.version) |
---|
40 | # |
---|
41 | nxentry = nxh5_lib.makeGroup(f, 'scan_%04d' % scan_number, "NXentry") |
---|
42 | nxh5_lib.makeDataset(nxentry, 'scan_number', data=scan_number) |
---|
43 | nxh5_lib.makeDataset(nxentry, 'scan_rank', data=rank) |
---|
44 | nxh5_lib.makeDataset(nxentry, 'original_filename', data=data[0]['filename']) |
---|
45 | |
---|
46 | nxdata = nxh5_lib.makeGroup(nxentry, 'data', "NXdata") |
---|
47 | |
---|
48 | # TODO: prepare to slice arrays due to difference in curr_pt and npts (dimensions acquired v. planned) |
---|
49 | |
---|
50 | signal = [] |
---|
51 | axes = [] |
---|
52 | if rank > 0: |
---|
53 | nxh5_lib.makeDataset(nxentry, 'date_time', data=data[1].time) |
---|
54 | for order in range(rank): |
---|
55 | dim = data[order+1] |
---|
56 | nxcoll = nxh5_lib.makeGroup(nxentry, 'dim'+str(order+1), "NXcollection") |
---|
57 | default_pos = None |
---|
58 | for item in dim.p: |
---|
59 | ds = nxh5_lib.makeDataset(nxcoll, |
---|
60 | makeSafeHdf5Name(item.fieldName), |
---|
61 | sscan_part = 'positioner', |
---|
62 | data=item.data, |
---|
63 | units=item.unit, |
---|
64 | number=item.number, |
---|
65 | long_name=item.desc, |
---|
66 | readback_name=item.readback_name, |
---|
67 | readback_readback_desc=item.readback_desc, |
---|
68 | readback_unit=item.readback_unit, |
---|
69 | step_mode=item.step_mode, |
---|
70 | EPICS_PV=item.name) |
---|
71 | if default_pos is None: |
---|
72 | # Massively big assumption here that the first positioner found |
---|
73 | # for each dimension will be used as one of the axes to plot |
---|
74 | # the first detector found in the highest dimension |
---|
75 | # Hopefully, this mostly succeeds or can be be changed later. |
---|
76 | default_pos = item |
---|
77 | dataset_name = 'p%d' % (order+1) |
---|
78 | nxh5_lib.makeLink(nxdata, ds, dataset_name) |
---|
79 | axes.append(dataset_name) |
---|
80 | default_det = None |
---|
81 | for item in dim.d: |
---|
82 | ds = nxh5_lib.makeDataset(nxcoll, |
---|
83 | makeSafeHdf5Name(item.fieldName), |
---|
84 | sscan_part = 'detector', |
---|
85 | data=item.data, |
---|
86 | units=item.unit, |
---|
87 | long_name=item.desc, |
---|
88 | EPICS_PV=item.name) |
---|
89 | if default_det is None: |
---|
90 | default_det = item |
---|
91 | dataset_name = 'd%d' % (order+1) |
---|
92 | nxh5_lib.makeLink(nxdata, ds, dataset_name) |
---|
93 | signal = [dataset_name] |
---|
94 | for item in dim.t: |
---|
95 | nxh5_lib.makeDataset(nxcoll, |
---|
96 | makeSafeHdf5Name('T%02d' % item.number), |
---|
97 | sscan_part = 'trigger', |
---|
98 | data=item.command, |
---|
99 | number=item.number, |
---|
100 | EPICS_PV=item.name) |
---|
101 | nxdata.attrs['signal'] = signal |
---|
102 | nxdata.attrs[signal[0] + '_indices'] = axes |
---|
103 | |
---|
104 | pvs = epics_pvs(data) |
---|
105 | if len(pvs) > 0: |
---|
106 | nxcollection = nxh5_lib.makeGroup(nxentry, 'EPICS_PVs', "NXcollection") |
---|
107 | for pv, v in pvs.items(): |
---|
108 | nxh5_lib.makeDataset(nxcollection, |
---|
109 | makeSafeHdf5Name(pv), |
---|
110 | data=v['value'], |
---|
111 | units=v['units'], |
---|
112 | long_name=v['description'], |
---|
113 | EPICS_type=v['EPICS_type'], |
---|
114 | EPICS_PV=pv) |
---|
115 | |
---|
116 | f.close() |
---|
117 | |
---|
118 | |
---|
119 | def makeSafeHdf5Name(proposed): |
---|
120 | '''return a name that is safe to use as a NeXus HDF5 object''' |
---|
121 | # Note that a safe NeXus object name starts with a letter (upper or lower case) |
---|
122 | # or "_" (underscore), then letters, numbers, and "_" and is limited to |
---|
123 | # no more than 63 characters (imposed by the HDF5 rules for names). |
---|
124 | safe = '' |
---|
125 | for c in proposed: |
---|
126 | if c.isalnum() or c == '_': |
---|
127 | if len(safe) == 0 and c.isdigit(): |
---|
128 | safe = '_' |
---|
129 | safe += c |
---|
130 | else: |
---|
131 | safe += '_' |
---|
132 | if safe.startswith('NX'): |
---|
133 | safe = '_' + safe |
---|
134 | return safe |
---|
135 | |
---|
136 | |
---|
137 | def epics_pvs(data): |
---|
138 | pvs = {} |
---|
139 | for pv in data[0].keys(): |
---|
140 | if pv not in data[0]['ourKeys']: |
---|
141 | desc, unit, value, eType, count = data[0][pv] |
---|
142 | epics_type = mda.EPICS_types(eType) |
---|
143 | pvs[pv] = { |
---|
144 | 'description': desc, |
---|
145 | 'units': unit, |
---|
146 | 'value': value, |
---|
147 | 'EPICS_type': epics_type, |
---|
148 | 'count': count, |
---|
149 | } |
---|
150 | return pvs |
---|
151 | |
---|
152 | |
---|
153 | def fix7idFile(nxFileName): |
---|
154 | '''identify components for NXdata (now handled in process() above)''' |
---|
155 | f = h5py.File(nxFileName, "a") |
---|
156 | make7idNxExample(f) |
---|
157 | f.close() |
---|
158 | |
---|
159 | |
---|
160 | def make7idNxExample(f): |
---|
161 | '''cherry-pick an image dataset for an example''' |
---|
162 | dim2_p1 = f['/scan_0040/dim2/P1'] |
---|
163 | dim3_p1 = f['/scan_0040/dim3/P1'] |
---|
164 | dim3_d09 = f['/scan_0040/dim3/D09'] |
---|
165 | nxf = nxh5_lib.makeFile("example_7id.h5", |
---|
166 | file_name="example_7id.h5", |
---|
167 | file_time=f.attrs.get('file_time'), |
---|
168 | creator=f.attrs.get('creator'), |
---|
169 | HDF5_Version=h5py.version.hdf5_version, |
---|
170 | h5py_version=h5py.version.version, |
---|
171 | original_filename = f['/scan_0040/original_filename'], |
---|
172 | original_datetime = f['/scan_0040/date_time'], |
---|
173 | ) |
---|
174 | nxentry = nxh5_lib.makeGroup(nxf, 'entry', "NXentry") |
---|
175 | default_data = None |
---|
176 | for series in range(len(dim2_p1)): |
---|
177 | data_name = 'data%d' % (series+1) |
---|
178 | nxdata = nxh5_lib.makeGroup(nxentry, |
---|
179 | data_name, |
---|
180 | "NXdata", |
---|
181 | signal="image", |
---|
182 | axes=['x', 'y'], |
---|
183 | x_indices=[1], |
---|
184 | y_indices=[1,2], |
---|
185 | ) |
---|
186 | if default_data is None: |
---|
187 | default_data = str(nxdata.name) # absolute path |
---|
188 | default_data = data_name # relative to this group |
---|
189 | nxentry.attrs['default_NXdata'] = default_data |
---|
190 | nxh5_lib.makeDataset(nxdata, |
---|
191 | makeSafeHdf5Name('image'), |
---|
192 | data=dim3_d09[series], |
---|
193 | units=dim3_d09.attrs.get('units'), |
---|
194 | long_name=dim3_d09.attrs.get('long_name'), |
---|
195 | signal=1, |
---|
196 | ) |
---|
197 | nxh5_lib.makeDataset(nxdata, |
---|
198 | makeSafeHdf5Name('x'), |
---|
199 | data=dim2_p1[series], |
---|
200 | units=dim2_p1.attrs.get('units'), |
---|
201 | long_name=dim2_p1.attrs.get('long_name'), |
---|
202 | ) |
---|
203 | nxh5_lib.makeDataset(nxdata, |
---|
204 | makeSafeHdf5Name('y'), |
---|
205 | data=dim3_p1[series], |
---|
206 | units=dim3_p1.attrs.get('units'), |
---|
207 | long_name=dim3_p1.attrs.get('long_name'), |
---|
208 | ) |
---|
209 | comment = ''' |
---|
210 | These datasets came from data collected at the APS using the EPICS sscan record. |
---|
211 | The <x> dataset represents the positioner values for each row. |
---|
212 | The <y> dataset represents the positioner values for each column, but the actual values change with each row. |
---|
213 | The <image> dataset should be plotted against <x> and <y> values such that image( x[row], y[row[col]] ). |
---|
214 | '''.strip() |
---|
215 | nxnote = nxh5_lib.makeGroup(nxentry, 'comment', "NXnote") |
---|
216 | nxh5_lib.makeDataset(nxnote, makeSafeHdf5Name('comment'), data=comment) |
---|
217 | nxf.close() |
---|
218 | |
---|
219 | |
---|
220 | def main(mdaFileList): |
---|
221 | '''do the work''' |
---|
222 | for item in mdaFileList: |
---|
223 | process(item) |
---|
224 | |
---|
225 | |
---|
226 | if __name__ == '__main__': |
---|
227 | filename = os.path.join('..', 'data', 'mda', '7idc_0040.mda') |
---|
228 | sys.argv.append(filename) |
---|
229 | sys.argv.append(os.path.join('..', 'data', 'mda', '2iddf_0012.mda')) |
---|
230 | sys.argv.append(os.path.join('..', 'data', 'mda', '2iddf_0001.mda')) |
---|
231 | main(sys.argv[1:]) |
---|
232 | |
---|
233 | # fix items in 7ID file |
---|
234 | fix7idFile(os.path.join('..', 'data', 'mda', '7idc_0040.h5')) |
---|