1 | #/usr/bin/env python |
---|
2 | # -*- coding: utf-8 -*- |
---|
3 | ''' |
---|
4 | *GSASIIpwd: Powder calculations module* |
---|
5 | ============================================== |
---|
6 | |
---|
7 | This version hacked to provide Laue Fringe fitting. |
---|
8 | |
---|
9 | ''' |
---|
10 | ########### SVN repository information ################### |
---|
11 | # $Date: 2023-03-25 16:11:14 +0000 (Sat, 25 Mar 2023) $ |
---|
12 | # $Author: toby $ |
---|
13 | # $Revision: 5523 $ |
---|
14 | # $URL: trunk/GSASIIpwd.py $ |
---|
15 | # $Id: GSASIIpwd.py 5523 2023-03-25 16:11:14Z toby $ |
---|
16 | ########### SVN repository information ################### |
---|
17 | from __future__ import division, print_function |
---|
18 | import sys |
---|
19 | import math |
---|
20 | import time |
---|
21 | import os |
---|
22 | import os.path |
---|
23 | import subprocess as subp |
---|
24 | import datetime as dt |
---|
25 | import copy |
---|
26 | |
---|
27 | import numpy as np |
---|
28 | import numpy.linalg as nl |
---|
29 | import numpy.ma as ma |
---|
30 | import random as rand |
---|
31 | import numpy.fft as fft |
---|
32 | import scipy.interpolate as si |
---|
33 | import scipy.stats as st |
---|
34 | import scipy.optimize as so |
---|
35 | import scipy.special as sp |
---|
36 | import scipy.signal as signal |
---|
37 | |
---|
38 | import GSASIIpath |
---|
39 | filversion = "$Revision: 5523 $" |
---|
40 | GSASIIpath.SetVersionNumber("$Revision: 5523 $") |
---|
41 | import GSASIIlattice as G2lat |
---|
42 | import GSASIIspc as G2spc |
---|
43 | import GSASIIElem as G2elem |
---|
44 | import GSASIImath as G2mth |
---|
45 | try: |
---|
46 | import pypowder as pyd |
---|
47 | except ImportError: |
---|
48 | print ('pypowder is not available - profile calcs. not allowed') |
---|
49 | try: |
---|
50 | import pydiffax as pyx |
---|
51 | except ImportError: |
---|
52 | print ('pydiffax is not available for this platform') |
---|
53 | import GSASIIfiles as G2fil |
---|
54 | |
---|
55 | # trig functions in degrees |
---|
56 | tand = lambda x: math.tan(x*math.pi/180.) |
---|
57 | atand = lambda x: 180.*math.atan(x)/math.pi |
---|
58 | atan2d = lambda y,x: 180.*math.atan2(y,x)/math.pi |
---|
59 | cosd = lambda x: math.cos(x*math.pi/180.) |
---|
60 | acosd = lambda x: 180.*math.acos(x)/math.pi |
---|
61 | rdsq2d = lambda x,p: round(1.0/math.sqrt(x),p) |
---|
62 | #numpy versions |
---|
63 | npsind = lambda x: np.sin(x*np.pi/180.) |
---|
64 | npasind = lambda x: 180.*np.arcsin(x)/math.pi |
---|
65 | npcosd = lambda x: np.cos(x*math.pi/180.) |
---|
66 | npacosd = lambda x: 180.*np.arccos(x)/math.pi |
---|
67 | nptand = lambda x: np.tan(x*math.pi/180.) |
---|
68 | npatand = lambda x: 180.*np.arctan(x)/np.pi |
---|
69 | npatan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi |
---|
70 | npT2stl = lambda tth, wave: 2.0*npsind(tth/2.0)/wave #=d* |
---|
71 | npT2q = lambda tth,wave: 2.0*np.pi*npT2stl(tth,wave) #=2pi*d* |
---|
72 | npq2T = lambda Q,wave: 2.0*npasind(0.25*Q*wave/np.pi) |
---|
73 | ateln2 = 8.0*math.log(2.0) |
---|
74 | sateln2 = np.sqrt(ateln2) |
---|
75 | nxs = np.newaxis |
---|
76 | is_exe = lambda fpath: os.path.isfile(fpath) and os.access(fpath, os.X_OK) |
---|
77 | |
---|
78 | #### Powder utilities ################################################################################ |
---|
79 | def PhaseWtSum(G2frame,histo): |
---|
80 | ''' |
---|
81 | Calculate sum of phase mass*phase fraction for PWDR data (exclude magnetic phases) |
---|
82 | |
---|
83 | :param G2frame: GSASII main frame structure |
---|
84 | :param str histo: histogram name |
---|
85 | :returns: sum(scale*mass) for phases in histo |
---|
86 | ''' |
---|
87 | Histograms,Phases = G2frame.GetUsedHistogramsAndPhasesfromTree() |
---|
88 | wtSum = 0.0 |
---|
89 | for phase in Phases: |
---|
90 | if Phases[phase]['General']['Type'] != 'magnetic': |
---|
91 | if histo in Phases[phase]['Histograms']: |
---|
92 | if not Phases[phase]['Histograms'][histo]['Use']: continue |
---|
93 | mass = Phases[phase]['General']['Mass'] |
---|
94 | phFr = Phases[phase]['Histograms'][histo]['Scale'][0] |
---|
95 | wtSum += mass*phFr |
---|
96 | return wtSum |
---|
97 | |
---|
98 | #### GSASII pwdr & pdf calculation routines ################################################################################ |
---|
99 | def Transmission(Geometry,Abs,Diam): |
---|
100 | ''' |
---|
101 | Calculate sample transmission |
---|
102 | |
---|
103 | :param str Geometry: one of 'Cylinder','Bragg-Brentano','Tilting flat plate in transmission','Fixed flat plate' |
---|
104 | :param float Abs: absorption coeff in cm-1 |
---|
105 | :param float Diam: sample thickness/diameter in mm |
---|
106 | ''' |
---|
107 | if 'Cylinder' in Geometry: #Lobanov & Alte da Veiga for 2-theta = 0; beam fully illuminates sample |
---|
108 | MuR = Abs*Diam/20.0 |
---|
109 | if MuR <= 3.0: |
---|
110 | T0 = 16/(3.*math.pi) |
---|
111 | T1 = -0.045780 |
---|
112 | T2 = -0.02489 |
---|
113 | T3 = 0.003045 |
---|
114 | T = -T0*MuR-T1*MuR**2-T2*MuR**3-T3*MuR**4 |
---|
115 | if T < -20.: |
---|
116 | return 2.06e-9 |
---|
117 | else: |
---|
118 | return math.exp(T) |
---|
119 | else: |
---|
120 | T1 = 1.433902 |
---|
121 | T2 = 0.013869+0.337894 |
---|
122 | T3 = 1.933433+1.163198 |
---|
123 | T4 = 0.044365-0.04259 |
---|
124 | T = (T1-T4)/(1.0+T2*(MuR-3.0))**T3+T4 |
---|
125 | return T/100. |
---|
126 | elif 'plate' in Geometry: |
---|
127 | MuR = Abs*Diam/10. |
---|
128 | return math.exp(-MuR) |
---|
129 | elif 'Bragg' in Geometry: |
---|
130 | return 0.0 |
---|
131 | |
---|
132 | def SurfaceRough(SRA,SRB,Tth): |
---|
133 | ''' Suortti (J. Appl. Cryst, 5,325-331, 1972) surface roughness correction |
---|
134 | :param float SRA: Suortti surface roughness parameter |
---|
135 | :param float SRB: Suortti surface roughness parameter |
---|
136 | :param float Tth: 2-theta(deg) - can be numpy array |
---|
137 | |
---|
138 | ''' |
---|
139 | sth = npsind(Tth/2.) |
---|
140 | T1 = np.exp(-SRB/sth) |
---|
141 | T2 = SRA+(1.-SRA)*np.exp(-SRB) |
---|
142 | return (SRA+(1.-SRA)*T1)/T2 |
---|
143 | |
---|
144 | def SurfaceRoughDerv(SRA,SRB,Tth): |
---|
145 | ''' Suortti surface roughness correction derivatives |
---|
146 | :param float SRA: Suortti surface roughness parameter (dimensionless) |
---|
147 | :param float SRB: Suortti surface roughness parameter (dimensionless) |
---|
148 | :param float Tth: 2-theta(deg) - can be numpy array |
---|
149 | :return list: [dydSRA,dydSRB] derivatives to be used for intensity derivative |
---|
150 | ''' |
---|
151 | sth = npsind(Tth/2.) |
---|
152 | T1 = np.exp(-SRB/sth) |
---|
153 | T2 = SRA+(1.-SRA)*np.exp(-SRB) |
---|
154 | Trans = (SRA+(1.-SRA)*T1)/T2 |
---|
155 | dydSRA = ((1.-T1)*T2-(1.-np.exp(-SRB))*Trans)/T2**2 |
---|
156 | dydSRB = ((SRA-1.)*T1*T2/sth-Trans*(SRA-T2))/T2**2 |
---|
157 | return [dydSRA,dydSRB] |
---|
158 | |
---|
159 | def Absorb(Geometry,MuR,Tth,Phi=0,Psi=0): |
---|
160 | '''Calculate sample absorption |
---|
161 | :param str Geometry: one of 'Cylinder','Bragg-Brentano','Tilting Flat Plate in transmission','Fixed flat plate' |
---|
162 | :param float MuR: absorption coeff * sample thickness/2 or radius |
---|
163 | :param Tth: 2-theta scattering angle - can be numpy array |
---|
164 | :param float Phi: flat plate tilt angle - future |
---|
165 | :param float Psi: flat plate tilt axis - future |
---|
166 | ''' |
---|
167 | |
---|
168 | def muRunder3(MuR,Sth2): |
---|
169 | T0 = 16.0/(3.*np.pi) |
---|
170 | T1 = (25.99978-0.01911*Sth2**0.25)*np.exp(-0.024551*Sth2)+ \ |
---|
171 | 0.109561*np.sqrt(Sth2)-26.04556 |
---|
172 | T2 = -0.02489-0.39499*Sth2+1.219077*Sth2**1.5- \ |
---|
173 | 1.31268*Sth2**2+0.871081*Sth2**2.5-0.2327*Sth2**3 |
---|
174 | T3 = 0.003045+0.018167*Sth2-0.03305*Sth2**2 |
---|
175 | Trns = -T0*MuR-T1*MuR**2-T2*MuR**3-T3*MuR**4 |
---|
176 | return np.exp(Trns) |
---|
177 | |
---|
178 | def muRover3(MuR,Sth2): |
---|
179 | T1 = 1.433902+11.07504*Sth2-8.77629*Sth2*Sth2+ \ |
---|
180 | 10.02088*Sth2**3-3.36778*Sth2**4 |
---|
181 | T2 = (0.013869-0.01249*Sth2)*np.exp(3.27094*Sth2)+ \ |
---|
182 | (0.337894+13.77317*Sth2)/(1.0+11.53544*Sth2)**1.555039 |
---|
183 | T3 = 1.933433/(1.0+23.12967*Sth2)**1.686715- \ |
---|
184 | 0.13576*np.sqrt(Sth2)+1.163198 |
---|
185 | T4 = 0.044365-0.04259/(1.0+0.41051*Sth2)**148.4202 |
---|
186 | Trns = (T1-T4)/(1.0+T2*(MuR-3.0))**T3+T4 |
---|
187 | return Trns/100. |
---|
188 | |
---|
189 | Sth2 = npsind(Tth/2.0)**2 |
---|
190 | if 'Cylinder' in Geometry: #Lobanov & Alte da Veiga for 2-theta = 0; beam fully illuminates sample |
---|
191 | if 'array' in str(type(MuR)): |
---|
192 | MuRSTh2 = np.vstack((MuR,Sth2)) |
---|
193 | AbsCr = np.where(MuRSTh2[0]<=3.0,muRunder3(MuRSTh2[0],MuRSTh2[1]),muRover3(MuRSTh2[0],MuRSTh2[1])) |
---|
194 | return AbsCr |
---|
195 | else: |
---|
196 | if MuR <= 3.0: |
---|
197 | return muRunder3(MuR,Sth2) |
---|
198 | else: |
---|
199 | return muRover3(MuR,Sth2) |
---|
200 | elif 'Bragg' in Geometry: |
---|
201 | return 1.0 |
---|
202 | elif 'Fixed' in Geometry: #assumes sample plane is perpendicular to incident beam |
---|
203 | # and only defined for 2theta < 90 |
---|
204 | MuT = 2.*MuR |
---|
205 | T1 = np.exp(-MuT) |
---|
206 | T2 = np.exp(-MuT/npcosd(Tth)) |
---|
207 | Tb = MuT-MuT/npcosd(Tth) |
---|
208 | return (T2-T1)/Tb |
---|
209 | elif 'Tilting' in Geometry: #assumes symmetric tilt so sample plane is parallel to diffraction vector |
---|
210 | MuT = 2.*MuR |
---|
211 | cth = npcosd(Tth/2.0) |
---|
212 | return np.exp(-MuT/cth)/cth |
---|
213 | |
---|
214 | def AbsorbDerv(Geometry,MuR,Tth,Phi=0,Psi=0): |
---|
215 | 'needs a doc string' |
---|
216 | dA = 0.001 |
---|
217 | AbsP = Absorb(Geometry,MuR+dA,Tth,Phi,Psi) |
---|
218 | if MuR: |
---|
219 | AbsM = Absorb(Geometry,MuR-dA,Tth,Phi,Psi) |
---|
220 | return (AbsP-AbsM)/(2.0*dA) |
---|
221 | else: |
---|
222 | return (AbsP-1.)/dA |
---|
223 | |
---|
224 | def Polarization(Pola,Tth,Azm=0.0): |
---|
225 | """ Calculate angle dependent x-ray polarization correction (not scaled correctly!) |
---|
226 | |
---|
227 | :param Pola: polarization coefficient e.g 1.0 fully polarized, 0.5 unpolarized |
---|
228 | :param Azm: azimuthal angle e.g. 0.0 in plane of polarization - can be numpy array |
---|
229 | :param Tth: 2-theta scattering angle - can be numpy array |
---|
230 | which (if either) of these is "right"? |
---|
231 | :return: (pola, dpdPola) - both 2-d arrays |
---|
232 | * pola = ((1-Pola)*npcosd(Azm)**2+Pola*npsind(Azm)**2)*npcosd(Tth)**2+ \ |
---|
233 | (1-Pola)*npsind(Azm)**2+Pola*npcosd(Azm)**2 |
---|
234 | * dpdPola: derivative needed for least squares |
---|
235 | |
---|
236 | """ |
---|
237 | cazm = npcosd(Azm)**2 |
---|
238 | sazm = npsind(Azm)**2 |
---|
239 | pola = ((1.0-Pola)*cazm+Pola*sazm)*npcosd(Tth)**2+(1.0-Pola)*sazm+Pola*cazm |
---|
240 | dpdPola = -npsind(Tth)**2*(sazm-cazm) |
---|
241 | return pola,dpdPola |
---|
242 | |
---|
243 | def Oblique(ObCoeff,Tth): |
---|
244 | 'currently assumes detector is normal to beam' |
---|
245 | if ObCoeff: |
---|
246 | K = (1.-ObCoeff)/(1.0-np.exp(np.log(ObCoeff)/npcosd(Tth))) |
---|
247 | return K |
---|
248 | else: |
---|
249 | return 1.0 |
---|
250 | |
---|
251 | def Ruland(RulCoff,wave,Q,Compton): |
---|
252 | 'needs a doc string' |
---|
253 | C = 2.9978e8 |
---|
254 | D = 1.5e-3 |
---|
255 | hmc = 0.024262734687 #Compton wavelength in A |
---|
256 | sinth2 = (Q*wave/(4.0*np.pi))**2 |
---|
257 | dlam = (wave**2)*Compton*Q/C |
---|
258 | dlam_c = 2.0*hmc*sinth2-D*wave**2 |
---|
259 | return 1.0/((1.0+dlam/RulCoff)*(1.0+(np.pi*dlam_c/(dlam+RulCoff))**2)) |
---|
260 | |
---|
261 | def KleinNishina(wave,Q): |
---|
262 | hmc = 0.024262734687 #Compton wavelength in A |
---|
263 | TTh = npq2T(Q,wave) |
---|
264 | P = 1./(1.+(1.-npcosd(TTh)*(hmc/wave))) |
---|
265 | KN = (P**3-(P*npsind(TTh))**2+P)/(1.+npcosd(TTh)**2) |
---|
266 | return KN |
---|
267 | |
---|
268 | def LorchWeight(Q): |
---|
269 | 'needs a doc string' |
---|
270 | return np.sin(np.pi*(Q[-1]-Q)/(2.0*Q[-1])) |
---|
271 | |
---|
272 | def GetAsfMean(ElList,Sthl2): |
---|
273 | '''Calculate various scattering factor terms for PDF calcs |
---|
274 | |
---|
275 | :param dict ElList: element dictionary contains scattering factor coefficients, etc. |
---|
276 | :param np.array Sthl2: numpy array of sin theta/lambda squared values |
---|
277 | :returns: mean(f^2), mean(f)^2, mean(compton) |
---|
278 | ''' |
---|
279 | sumNoAtoms = 0.0 |
---|
280 | FF = np.zeros_like(Sthl2) |
---|
281 | FF2 = np.zeros_like(Sthl2) |
---|
282 | CF = np.zeros_like(Sthl2) |
---|
283 | for El in ElList: |
---|
284 | sumNoAtoms += ElList[El]['FormulaNo'] |
---|
285 | for El in ElList: |
---|
286 | el = ElList[El] |
---|
287 | ff2 = (G2elem.ScatFac(el,Sthl2)+el['fp'])**2+el['fpp']**2 |
---|
288 | cf = G2elem.ComptonFac(el,Sthl2) |
---|
289 | FF += np.sqrt(ff2)*el['FormulaNo']/sumNoAtoms |
---|
290 | FF2 += ff2*el['FormulaNo']/sumNoAtoms |
---|
291 | CF += cf*el['FormulaNo']/sumNoAtoms |
---|
292 | return FF2,FF**2,CF |
---|
293 | |
---|
294 | def GetNumDensity(ElList,Vol): |
---|
295 | 'needs a doc string' |
---|
296 | sumNoAtoms = 0.0 |
---|
297 | for El in ElList: |
---|
298 | sumNoAtoms += ElList[El]['FormulaNo'] |
---|
299 | return sumNoAtoms/Vol |
---|
300 | |
---|
301 | def CalcPDF(data,inst,limits,xydata): |
---|
302 | '''Computes I(Q), S(Q) & G(r) from Sample, Bkg, etc. diffraction patterns loaded into |
---|
303 | dict xydata; results are placed in xydata. |
---|
304 | Calculation parameters are found in dicts data and inst and list limits. |
---|
305 | The return value is at present an empty list. |
---|
306 | ''' |
---|
307 | auxPlot = [] |
---|
308 | if 'T' in inst['Type'][0]: |
---|
309 | Ibeg = 0 |
---|
310 | Ifin = len(xydata['Sample'][1][0]) |
---|
311 | else: |
---|
312 | Ibeg = np.searchsorted(xydata['Sample'][1][0],limits[0]) |
---|
313 | Ifin = np.searchsorted(xydata['Sample'][1][0],limits[1])+1 |
---|
314 | #subtract backgrounds - if any & use PWDR limits |
---|
315 | IofQ = copy.deepcopy(xydata['Sample']) |
---|
316 | IofQ[1] = np.array([I[Ibeg:Ifin] for I in IofQ[1]]) |
---|
317 | if data['Sample Bkg.']['Name']: |
---|
318 | IofQ[1][1] += xydata['Sample Bkg.'][1][1][Ibeg:Ifin]*data['Sample Bkg.']['Mult'] |
---|
319 | if data['Container']['Name']: |
---|
320 | xycontainer = xydata['Container'][1][1]*data['Container']['Mult'] |
---|
321 | if data['Container Bkg.']['Name']: |
---|
322 | xycontainer += xydata['Container Bkg.'][1][1][Ibeg:Ifin]*data['Container Bkg.']['Mult'] |
---|
323 | IofQ[1][1] += xycontainer[Ibeg:Ifin] |
---|
324 | data['IofQmin'] = IofQ[1][1][-1] |
---|
325 | IofQ[1][1] -= data.get('Flat Bkg',0.) |
---|
326 | #get element data & absorption coeff. |
---|
327 | ElList = data['ElList'] |
---|
328 | Tth = IofQ[1][0] #2-theta or TOF! |
---|
329 | if 'X' in inst['Type'][0]: |
---|
330 | Abs = G2lat.CellAbsorption(ElList,data['Form Vol']) |
---|
331 | #Apply angle dependent corrections |
---|
332 | MuR = Abs*data['Diam']/20.0 |
---|
333 | IofQ[1][1] /= Absorb(data['Geometry'],MuR,Tth) |
---|
334 | IofQ[1][1] /= Polarization(inst['Polariz.'][1],Tth,Azm=inst['Azimuth'][1])[0] |
---|
335 | if data['DetType'] == 'Area detector': |
---|
336 | IofQ[1][1] *= Oblique(data['ObliqCoeff'],Tth) |
---|
337 | elif 'T' in inst['Type'][0]: #neutron TOF normalized data - needs wavelength dependent absorption |
---|
338 | wave = 2.*G2lat.TOF2dsp(inst,IofQ[1][0])*npsind(inst['2-theta'][1]/2.) |
---|
339 | Els = ElList.keys() |
---|
340 | Isotope = {El:'Nat. abund.' for El in Els} |
---|
341 | GD = {'AtomTypes':ElList,'Isotope':Isotope} |
---|
342 | BLtables = G2elem.GetBLtable(GD) |
---|
343 | FP,FPP = G2elem.BlenResTOF(Els,BLtables,wave) |
---|
344 | Abs = np.zeros(len(wave)) |
---|
345 | for iel,El in enumerate(Els): |
---|
346 | BL = BLtables[El][1] |
---|
347 | SA = BL['SA']*wave/1.798197+4.0*np.pi*FPP[iel]**2 #+BL['SL'][1]? |
---|
348 | SA *= ElList[El]['FormulaNo']/data['Form Vol'] |
---|
349 | Abs += SA |
---|
350 | MuR = Abs*data['Diam']/2. |
---|
351 | IofQ[1][1] /= Absorb(data['Geometry'],MuR,inst['2-theta'][1]*np.ones(len(wave))) |
---|
352 | # improves look of F(Q) but no impact on G(R) |
---|
353 | # bBut,aBut = signal.butter(8,.5,"lowpass") |
---|
354 | # IofQ[1][1] = signal.filtfilt(bBut,aBut,IofQ[1][1]) |
---|
355 | XY = IofQ[1] |
---|
356 | #convert to Q |
---|
357 | nQpoints = 5000 |
---|
358 | if 'C' in inst['Type'][0]: |
---|
359 | wave = G2mth.getWave(inst) |
---|
360 | minQ = npT2q(Tth[0],wave) |
---|
361 | maxQ = npT2q(Tth[-1],wave) |
---|
362 | Qpoints = np.linspace(0.,maxQ,nQpoints,endpoint=True) |
---|
363 | dq = Qpoints[1]-Qpoints[0] |
---|
364 | XY[0] = npT2q(XY[0],wave) |
---|
365 | Qdata = si.griddata(XY[0],XY[1],Qpoints,method='linear',fill_value=XY[1][0]) #interpolate I(Q) |
---|
366 | elif 'T' in inst['Type'][0]: |
---|
367 | difC = inst['difC'][1] |
---|
368 | minQ = 2.*np.pi*difC/Tth[-1] |
---|
369 | maxQ = 2.*np.pi*difC/Tth[0] |
---|
370 | Qpoints = np.linspace(0.,maxQ,nQpoints,endpoint=True) |
---|
371 | dq = Qpoints[1]-Qpoints[0] |
---|
372 | XY[0] = 2.*np.pi*difC/XY[0] |
---|
373 | Qdata = si.griddata(XY[0],XY[1],Qpoints,method='linear',fill_value=XY[1][-1]) #interpolate I(Q) |
---|
374 | Qdata -= np.min(Qdata)*data['BackRatio'] |
---|
375 | |
---|
376 | qLimits = data['QScaleLim'] |
---|
377 | maxQ = np.searchsorted(Qpoints,min(Qpoints[-1],qLimits[1]))+1 |
---|
378 | minQ = np.searchsorted(Qpoints,min(qLimits[0],0.90*Qpoints[-1])) |
---|
379 | qLimits = [Qpoints[minQ],Qpoints[maxQ-1]] |
---|
380 | newdata = [] |
---|
381 | if len(IofQ) < 3: |
---|
382 | xydata['IofQ'] = [IofQ[0],[Qpoints,Qdata],''] |
---|
383 | else: |
---|
384 | xydata['IofQ'] = [IofQ[0],[Qpoints,Qdata],IofQ[2]] |
---|
385 | for item in xydata['IofQ'][1]: |
---|
386 | newdata.append(item[:maxQ]) |
---|
387 | xydata['IofQ'][1] = newdata |
---|
388 | |
---|
389 | xydata['SofQ'] = copy.deepcopy(xydata['IofQ']) |
---|
390 | if 'XC' in inst['Type'][0]: |
---|
391 | FFSq,SqFF,CF = GetAsfMean(ElList,(xydata['SofQ'][1][0]/(4.0*np.pi))**2) #these are <f^2>,<f>^2,Cf |
---|
392 | else: #TOF |
---|
393 | CF = np.zeros(len(xydata['SofQ'][1][0])) |
---|
394 | FFSq = np.ones(len(xydata['SofQ'][1][0])) |
---|
395 | SqFF = np.ones(len(xydata['SofQ'][1][0])) |
---|
396 | Q = xydata['SofQ'][1][0] |
---|
397 | # auxPlot.append([Q,np.copy(CF),'CF-unCorr']) |
---|
398 | if 'XC' in inst['Type'][0]: |
---|
399 | # CF *= KleinNishina(wave,Q) |
---|
400 | ruland = Ruland(data['Ruland'],wave,Q,CF) |
---|
401 | # auxPlot.append([Q,ruland,'Ruland']) |
---|
402 | CF *= ruland |
---|
403 | # auxPlot.append([Q,CF,'CF-Corr']) |
---|
404 | scale = np.sum((FFSq+CF)[minQ:maxQ])/np.sum(xydata['SofQ'][1][1][minQ:maxQ]) |
---|
405 | xydata['SofQ'][1][1] *= scale |
---|
406 | if 'XC' in inst['Type'][0]: |
---|
407 | xydata['SofQ'][1][1] -= CF |
---|
408 | xydata['SofQ'][1][1] = xydata['SofQ'][1][1]/SqFF |
---|
409 | scale = len(xydata['SofQ'][1][1][minQ:maxQ])/np.sum(xydata['SofQ'][1][1][minQ:maxQ]) |
---|
410 | xydata['SofQ'][1][1] *= scale |
---|
411 | xydata['FofQ'] = copy.deepcopy(xydata['SofQ']) |
---|
412 | xydata['FofQ'][1][1] = xydata['FofQ'][1][0]*(xydata['SofQ'][1][1]-1.0) |
---|
413 | if data['Lorch']: |
---|
414 | xydata['FofQ'][1][1] *= LorchWeight(Q) |
---|
415 | xydata['GofR'] = copy.deepcopy(xydata['FofQ']) |
---|
416 | xydata['gofr'] = copy.deepcopy(xydata['FofQ']) |
---|
417 | nR = len(xydata['GofR'][1][1]) |
---|
418 | Rmax = GSASIIpath.GetConfigValue('PDF_Rmax',100.) |
---|
419 | mul = int(round(2.*np.pi*nR/(Rmax*qLimits[1]))) |
---|
420 | # mul = int(round(2.*np.pi*nR/(data.get('Rmax',100.)*qLimits[1]))) |
---|
421 | R = 2.*np.pi*np.linspace(0,nR,nR,endpoint=True)/(mul*qLimits[1]) |
---|
422 | xydata['GofR'][1][0] = R |
---|
423 | xydata['gofr'][1][0] = R |
---|
424 | GR = -(2./np.pi)*dq*np.imag(fft.fft(xydata['FofQ'][1][1],mul*nR)[:nR])*data.get('GR Scale',1.0) |
---|
425 | # GR = -dq*np.imag(fft.fft(xydata['FofQ'][1][1],mul*nR)[:nR])*data.get('GR Scale',1.0) |
---|
426 | xydata['GofR'][1][1] = GR |
---|
427 | numbDen = 0. |
---|
428 | if 'ElList' in data: |
---|
429 | numbDen = GetNumDensity(data['ElList'],data['Form Vol']) |
---|
430 | gr = GR/(4.*np.pi*numbDen*R)+1. |
---|
431 | # gr = GR/(np.pi*R) ##mising numberdensity |
---|
432 | xydata['gofr'][1][1] = gr |
---|
433 | if data.get('noRing',True): |
---|
434 | Rmin = data['Rmin'] |
---|
435 | xydata['gofr'][1][1] = np.where(R<Rmin,-4.*numbDen,xydata['gofr'][1][1]) |
---|
436 | xydata['GofR'][1][1] = np.where(R<Rmin,-4.*R*np.pi*numbDen,xydata['GofR'][1][1]) |
---|
437 | return auxPlot |
---|
438 | |
---|
439 | def PDFPeakFit(peaks,data): |
---|
440 | rs2pi = 1./np.sqrt(2*np.pi) |
---|
441 | |
---|
442 | def MakeParms(peaks): |
---|
443 | varyList = [] |
---|
444 | parmDict = {'slope':peaks['Background'][1][1]} |
---|
445 | if peaks['Background'][2]: |
---|
446 | varyList.append('slope') |
---|
447 | for i,peak in enumerate(peaks['Peaks']): |
---|
448 | parmDict['PDFpos;'+str(i)] = peak[0] |
---|
449 | parmDict['PDFmag;'+str(i)] = peak[1] |
---|
450 | parmDict['PDFsig;'+str(i)] = peak[2] |
---|
451 | if 'P' in peak[3]: |
---|
452 | varyList.append('PDFpos;'+str(i)) |
---|
453 | if 'M' in peak[3]: |
---|
454 | varyList.append('PDFmag;'+str(i)) |
---|
455 | if 'S' in peak[3]: |
---|
456 | varyList.append('PDFsig;'+str(i)) |
---|
457 | return parmDict,varyList |
---|
458 | |
---|
459 | def SetParms(peaks,parmDict,varyList): |
---|
460 | if 'slope' in varyList: |
---|
461 | peaks['Background'][1][1] = parmDict['slope'] |
---|
462 | for i,peak in enumerate(peaks['Peaks']): |
---|
463 | if 'PDFpos;'+str(i) in varyList: |
---|
464 | peak[0] = parmDict['PDFpos;'+str(i)] |
---|
465 | if 'PDFmag;'+str(i) in varyList: |
---|
466 | peak[1] = parmDict['PDFmag;'+str(i)] |
---|
467 | if 'PDFsig;'+str(i) in varyList: |
---|
468 | peak[2] = parmDict['PDFsig;'+str(i)] |
---|
469 | |
---|
470 | |
---|
471 | def CalcPDFpeaks(parmdict,Xdata): |
---|
472 | Z = parmDict['slope']*Xdata |
---|
473 | ipeak = 0 |
---|
474 | while True: |
---|
475 | try: |
---|
476 | pos = parmdict['PDFpos;'+str(ipeak)] |
---|
477 | mag = parmdict['PDFmag;'+str(ipeak)] |
---|
478 | wid = parmdict['PDFsig;'+str(ipeak)] |
---|
479 | wid2 = 2.*wid**2 |
---|
480 | Z += mag*rs2pi*np.exp(-(Xdata-pos)**2/wid2)/wid |
---|
481 | ipeak += 1 |
---|
482 | except KeyError: #no more peaks to process |
---|
483 | return Z |
---|
484 | |
---|
485 | def errPDFProfile(values,xdata,ydata,parmdict,varylist): |
---|
486 | parmdict.update(zip(varylist,values)) |
---|
487 | M = CalcPDFpeaks(parmdict,xdata)-ydata |
---|
488 | return M |
---|
489 | |
---|
490 | newpeaks = copy.copy(peaks) |
---|
491 | iBeg = np.searchsorted(data[1][0],newpeaks['Limits'][0]) |
---|
492 | iFin = np.searchsorted(data[1][0],newpeaks['Limits'][1])+1 |
---|
493 | X = data[1][0][iBeg:iFin] |
---|
494 | Y = data[1][1][iBeg:iFin] |
---|
495 | parmDict,varyList = MakeParms(peaks) |
---|
496 | if not len(varyList): |
---|
497 | G2fil.G2Print (' Nothing varied') |
---|
498 | return newpeaks,None,None,None,None,None |
---|
499 | |
---|
500 | Rvals = {} |
---|
501 | values = np.array(Dict2Values(parmDict, varyList)) |
---|
502 | result = so.leastsq(errPDFProfile,values,full_output=True,ftol=0.0001, |
---|
503 | args=(X,Y,parmDict,varyList)) |
---|
504 | chisq = np.sum(result[2]['fvec']**2) |
---|
505 | Values2Dict(parmDict, varyList, result[0]) |
---|
506 | SetParms(peaks,parmDict,varyList) |
---|
507 | Rvals['Rwp'] = np.sqrt(chisq/np.sum(Y**2))*100. #to % |
---|
508 | chisq = np.sum(result[2]['fvec']**2)/(len(X)-len(values)) #reduced chi^2 = M/(Nobs-Nvar) |
---|
509 | sigList = list(np.sqrt(chisq*np.diag(result[1]))) |
---|
510 | Z = CalcPDFpeaks(parmDict,X) |
---|
511 | newpeaks['calc'] = [X,Z] |
---|
512 | return newpeaks,result[0],varyList,sigList,parmDict,Rvals |
---|
513 | |
---|
514 | def MakeRDF(RDFcontrols,background,inst,pwddata): |
---|
515 | auxPlot = [] |
---|
516 | if 'C' in inst['Type'][0] or 'B' in inst['Type'][0]: |
---|
517 | Tth = pwddata[0] |
---|
518 | wave = G2mth.getWave(inst) |
---|
519 | minQ = npT2q(Tth[0],wave) |
---|
520 | maxQ = npT2q(Tth[-1],wave) |
---|
521 | powQ = npT2q(Tth,wave) |
---|
522 | elif 'T' in inst['Type'][0]: |
---|
523 | TOF = pwddata[0] |
---|
524 | difC = inst['difC'][1] |
---|
525 | minQ = 2.*np.pi*difC/TOF[-1] |
---|
526 | maxQ = 2.*np.pi*difC/TOF[0] |
---|
527 | powQ = 2.*np.pi*difC/TOF |
---|
528 | piDQ = np.pi/(maxQ-minQ) |
---|
529 | Qpoints = np.linspace(minQ,maxQ,len(pwddata[0]),endpoint=True) |
---|
530 | if RDFcontrols['UseObsCalc'] == 'obs-calc': |
---|
531 | Qdata = si.griddata(powQ,pwddata[1]-pwddata[3],Qpoints,method=RDFcontrols['Smooth'],fill_value=0.) |
---|
532 | elif RDFcontrols['UseObsCalc'] == 'obs-back': |
---|
533 | Qdata = si.griddata(powQ,pwddata[1]-pwddata[4],Qpoints,method=RDFcontrols['Smooth'],fill_value=pwddata[1][0]) |
---|
534 | elif RDFcontrols['UseObsCalc'] == 'calc-back': |
---|
535 | Qdata = si.griddata(powQ,pwddata[3]-pwddata[4],Qpoints,method=RDFcontrols['Smooth'],fill_value=pwddata[1][0]) |
---|
536 | Qdata *= np.sin((Qpoints-minQ)*piDQ)/piDQ |
---|
537 | Qdata *= 0.5*np.sqrt(Qpoints) #Qbin normalization |
---|
538 | dq = Qpoints[1]-Qpoints[0] |
---|
539 | nR = len(Qdata) |
---|
540 | R = 0.5*np.pi*np.linspace(0,nR,nR)/(4.*maxQ) |
---|
541 | iFin = np.searchsorted(R,RDFcontrols['maxR'])+1 |
---|
542 | bBut,aBut = signal.butter(4,0.01) |
---|
543 | Qsmooth = signal.filtfilt(bBut,aBut,Qdata) |
---|
544 | # auxPlot.append([Qpoints,Qdata,'interpolate:'+RDFcontrols['Smooth']]) |
---|
545 | # auxPlot.append([Qpoints,Qsmooth,'interpolate:'+RDFcontrols['Smooth']]) |
---|
546 | DofR = dq*np.imag(fft.fft(Qsmooth,16*nR)[:nR]) |
---|
547 | auxPlot.append([R[:iFin],DofR[:iFin],'D(R) for '+RDFcontrols['UseObsCalc']]) |
---|
548 | return auxPlot |
---|
549 | |
---|
550 | # PDF optimization ============================================================= |
---|
551 | def OptimizePDF(data,xydata,limits,inst,showFit=True,maxCycles=25): |
---|
552 | import scipy.optimize as opt |
---|
553 | numbDen = GetNumDensity(data['ElList'],data['Form Vol']) |
---|
554 | Min,Init,Done = SetupPDFEval(data,xydata,limits,inst,numbDen) |
---|
555 | xstart = Init() |
---|
556 | bakMul = data['Sample Bkg.']['Mult'] |
---|
557 | if showFit: |
---|
558 | rms = Min(xstart) |
---|
559 | G2fil.G2Print(' Optimizing corrections to improve G(r) at low r') |
---|
560 | if data['Sample Bkg.'].get('Refine',False): |
---|
561 | # data['Flat Bkg'] = 0. |
---|
562 | G2fil.G2Print(' start: Ruland={:.3f}, Sample Bkg mult={:.3f} (RMS:{:.4f})'.format( |
---|
563 | data['Ruland'],data['Sample Bkg.']['Mult'],rms)) |
---|
564 | else: |
---|
565 | G2fil.G2Print(' start: Flat Bkg={:.1f}, BackRatio={:.3f}, Ruland={:.3f} (RMS:{:.4f})'.format( |
---|
566 | data['Flat Bkg'],data['BackRatio'],data['Ruland'],rms)) |
---|
567 | if data['Sample Bkg.'].get('Refine',False): |
---|
568 | res = opt.minimize(Min,xstart,bounds=([0.01,1.],[1.2*bakMul,0.8*bakMul]), |
---|
569 | method='L-BFGS-B',options={'maxiter':maxCycles},tol=0.001) |
---|
570 | else: |
---|
571 | res = opt.minimize(Min,xstart,bounds=([0.,None],[0,1],[0.01,1.]), |
---|
572 | method='L-BFGS-B',options={'maxiter':maxCycles},tol=0.001) |
---|
573 | Done(res['x']) |
---|
574 | if showFit: |
---|
575 | if res['success']: |
---|
576 | msg = 'Converged' |
---|
577 | else: |
---|
578 | msg = 'Not Converged' |
---|
579 | if data['Sample Bkg.'].get('Refine',False): |
---|
580 | G2fil.G2Print(' end: Ruland={:.3f}, Sample Bkg mult={:.3f} (RMS:{:.4f}) *** {} ***\n'.format( |
---|
581 | data['Ruland'],data['Sample Bkg.']['Mult'],res['fun'],msg)) |
---|
582 | else: |
---|
583 | G2fil.G2Print(' end: Flat Bkg={:.1f}, BackRatio={:.3f}, Ruland={:.3f} RMS:{:.4f}) *** {} ***\n'.format( |
---|
584 | data['Flat Bkg'],data['BackRatio'],data['Ruland'],res['fun'],msg)) |
---|
585 | return res |
---|
586 | |
---|
587 | def SetupPDFEval(data,xydata,limits,inst,numbDen): |
---|
588 | Data = copy.deepcopy(data) |
---|
589 | BkgMax = 1. |
---|
590 | def EvalLowPDF(arg): |
---|
591 | '''Objective routine -- evaluates the RMS deviations in G(r) |
---|
592 | from -4(pi)*#density*r for for r<Rmin |
---|
593 | arguments are ['Flat Bkg','BackRatio','Ruland'] scaled so that |
---|
594 | the min & max values are between 0 and 1. |
---|
595 | ''' |
---|
596 | if Data['Sample Bkg.'].get('Refine',False): |
---|
597 | R,S = arg |
---|
598 | Data['Sample Bkg.']['Mult'] = S |
---|
599 | else: |
---|
600 | F,B,R = arg |
---|
601 | Data['Flat Bkg'] = BkgMax*(2.*F-1.) |
---|
602 | Data['BackRatio'] = B |
---|
603 | Data['Ruland'] = R |
---|
604 | CalcPDF(Data,inst,limits,xydata) |
---|
605 | # test low r computation |
---|
606 | g = xydata['GofR'][1][1] |
---|
607 | r = xydata['GofR'][1][0] |
---|
608 | g0 = g[r < Data['Rmin']] + 4*np.pi*r[r < Data['Rmin']]*numbDen |
---|
609 | M = sum(g0**2)/len(g0) |
---|
610 | return M |
---|
611 | def GetCurrentVals(): |
---|
612 | '''Get the current ['Flat Bkg','BackRatio','Ruland'] with scaling |
---|
613 | ''' |
---|
614 | if data['Sample Bkg.'].get('Refine',False): |
---|
615 | return [max(data['Ruland'],.05),data['Sample']['Mult']] |
---|
616 | try: |
---|
617 | F = 0.5+0.5*data['Flat Bkg']/BkgMax |
---|
618 | except: |
---|
619 | F = 0 |
---|
620 | return [F,data['BackRatio'],max(data['Ruland'],.05)] |
---|
621 | def SetFinalVals(arg): |
---|
622 | '''Set the 'Flat Bkg', 'BackRatio' & 'Ruland' values from the |
---|
623 | scaled, refined values and plot corrected region of G(r) |
---|
624 | ''' |
---|
625 | if data['Sample Bkg.'].get('Refine',False): |
---|
626 | R,S = arg |
---|
627 | data['Sample Bkg.']['Mult'] = S |
---|
628 | else: |
---|
629 | F,B,R = arg |
---|
630 | data['Flat Bkg'] = BkgMax*(2.*F-1.) |
---|
631 | data['BackRatio'] = B |
---|
632 | data['Ruland'] = R |
---|
633 | CalcPDF(data,inst,limits,xydata) |
---|
634 | EvalLowPDF(GetCurrentVals()) |
---|
635 | BkgMax = max(xydata['IofQ'][1][1])/50. |
---|
636 | return EvalLowPDF,GetCurrentVals,SetFinalVals |
---|
637 | |
---|
638 | #### GSASII convolution peak fitting routines: Finger, Cox & Jephcoat model |
---|
639 | def factorize(num): |
---|
640 | ''' Provide prime number factors for integer num |
---|
641 | :returns: dictionary of prime factors (keys) & power for each (data) |
---|
642 | ''' |
---|
643 | factors = {} |
---|
644 | orig = num |
---|
645 | |
---|
646 | # we take advantage of the fact that (i +1)**2 = i**2 + 2*i +1 |
---|
647 | i, sqi = 2, 4 |
---|
648 | while sqi <= num: |
---|
649 | while not num%i: |
---|
650 | num /= i |
---|
651 | factors[i] = factors.get(i, 0) + 1 |
---|
652 | |
---|
653 | sqi += 2*i + 1 |
---|
654 | i += 1 |
---|
655 | |
---|
656 | if num != 1 and num != orig: |
---|
657 | factors[num] = factors.get(num, 0) + 1 |
---|
658 | |
---|
659 | if factors: |
---|
660 | return factors |
---|
661 | else: |
---|
662 | return {num:1} #a prime number! |
---|
663 | |
---|
664 | def makeFFTsizeList(nmin=1,nmax=1023,thresh=15): |
---|
665 | ''' Provide list of optimal data sizes for FFT calculations |
---|
666 | |
---|
667 | :param int nmin: minimum data size >= 1 |
---|
668 | :param int nmax: maximum data size > nmin |
---|
669 | :param int thresh: maximum prime factor allowed |
---|
670 | :Returns: list of data sizes where the maximum prime factor is < thresh |
---|
671 | ''' |
---|
672 | plist = [] |
---|
673 | nmin = max(1,nmin) |
---|
674 | nmax = max(nmin+1,nmax) |
---|
675 | for p in range(nmin,nmax): |
---|
676 | if max(list(factorize(p).keys())) < thresh: |
---|
677 | plist.append(p) |
---|
678 | return plist |
---|
679 | |
---|
680 | np.seterr(divide='ignore') |
---|
681 | |
---|
682 | # Normal distribution |
---|
683 | |
---|
684 | # loc = mu, scale = std |
---|
685 | _norm_pdf_C = 1./math.sqrt(2*math.pi) |
---|
686 | class norm_gen(st.rv_continuous): |
---|
687 | 'needs a doc string' |
---|
688 | |
---|
689 | def pdf(self,x,*args,**kwds): |
---|
690 | loc,scale=kwds['loc'],kwds['scale'] |
---|
691 | x = (x-loc)/scale |
---|
692 | return np.exp(-x**2/2.0) * _norm_pdf_C / scale |
---|
693 | |
---|
694 | norm = norm_gen(name='norm',longname='A normal',extradoc=""" |
---|
695 | |
---|
696 | Normal distribution |
---|
697 | |
---|
698 | The location (loc) keyword specifies the mean. |
---|
699 | The scale (scale) keyword specifies the standard deviation. |
---|
700 | |
---|
701 | normal.pdf(x) = exp(-x**2/2)/sqrt(2*pi) |
---|
702 | """) |
---|
703 | |
---|
704 | ## Cauchy |
---|
705 | |
---|
706 | # median = loc |
---|
707 | |
---|
708 | class cauchy_gen(st.rv_continuous): |
---|
709 | 'needs a doc string' |
---|
710 | |
---|
711 | def pdf(self,x,*args,**kwds): |
---|
712 | loc,scale=kwds['loc'],kwds['scale'] |
---|
713 | x = (x-loc)/scale |
---|
714 | return 1.0/np.pi/(1.0+x*x) / scale |
---|
715 | |
---|
716 | cauchy = cauchy_gen(name='cauchy',longname='Cauchy',extradoc=""" |
---|
717 | |
---|
718 | Cauchy distribution |
---|
719 | |
---|
720 | cauchy.pdf(x) = 1/(pi*(1+x**2)) |
---|
721 | |
---|
722 | This is the t distribution with one degree of freedom. |
---|
723 | """) |
---|
724 | |
---|
725 | |
---|
726 | class fcjde_gen(st.rv_continuous): |
---|
727 | """ |
---|
728 | Finger-Cox-Jephcoat D(2phi,2th) function for S/L = H/L |
---|
729 | Ref: J. Appl. Cryst. (1994) 27, 892-900. |
---|
730 | |
---|
731 | :param x: array -1 to 1 |
---|
732 | :param t: 2-theta position of peak |
---|
733 | :param s: sum(S/L,H/L); S: sample height, H: detector opening, |
---|
734 | L: sample to detector opening distance |
---|
735 | :param dx: 2-theta step size in deg |
---|
736 | |
---|
737 | :returns: for fcj.pdf |
---|
738 | |
---|
739 | * T = x*dx+t |
---|
740 | * s = S/L+H/L |
---|
741 | * if x < 0:: |
---|
742 | |
---|
743 | fcj.pdf = [1/sqrt({cos(T)**2/cos(t)**2}-1) - 1/s]/|cos(T)| |
---|
744 | |
---|
745 | * if x >= 0: fcj.pdf = 0 |
---|
746 | |
---|
747 | """ |
---|
748 | def _pdf(self,x,t,s,dx): |
---|
749 | T = dx*x+t |
---|
750 | ax2 = abs(npcosd(T)) |
---|
751 | ax = ax2**2 |
---|
752 | bx = npcosd(t)**2 |
---|
753 | bx = np.where(ax>bx,bx,ax) |
---|
754 | fx = np.where(ax>bx,(np.sqrt(bx/(ax-bx))-1./s)/ax2,0.0) |
---|
755 | fx = np.where(fx > 0.,fx,0.0) |
---|
756 | return fx |
---|
757 | |
---|
758 | def pdf(self,x,*args,**kwds): |
---|
759 | loc=kwds['loc'] |
---|
760 | return self._pdf(x-loc,*args) |
---|
761 | |
---|
762 | fcjde = fcjde_gen(name='fcjde',shapes='t,s,dx') |
---|
763 | |
---|
764 | def getFCJVoigt(pos,intens,sig,gam,shl,xdata): |
---|
765 | '''Compute the Finger-Cox-Jepcoat modified Voigt function for a |
---|
766 | CW powder peak by direct convolution. This version is not used. |
---|
767 | ''' |
---|
768 | DX = xdata[1]-xdata[0] |
---|
769 | widths,fmin,fmax = getWidthsCW(pos,sig,gam,shl) |
---|
770 | x = np.linspace(pos-fmin,pos+fmin,256) |
---|
771 | dx = x[1]-x[0] |
---|
772 | Norm = norm.pdf(x,loc=pos,scale=widths[0]) |
---|
773 | Cauchy = cauchy.pdf(x,loc=pos,scale=widths[1]) |
---|
774 | arg = [pos,shl/57.2958,dx,] |
---|
775 | FCJ = fcjde.pdf(x,*arg,loc=pos) |
---|
776 | if len(np.nonzero(FCJ)[0])>5: |
---|
777 | z = np.column_stack([Norm,Cauchy,FCJ]).T |
---|
778 | Z = fft.fft(z) |
---|
779 | Df = fft.ifft(Z.prod(axis=0)).real |
---|
780 | else: |
---|
781 | z = np.column_stack([Norm,Cauchy]).T |
---|
782 | Z = fft.fft(z) |
---|
783 | Df = fft.fftshift(fft.ifft(Z.prod(axis=0))).real |
---|
784 | Df /= np.sum(Df) |
---|
785 | Df = si.interp1d(x,Df,bounds_error=False,fill_value=0.0) |
---|
786 | return intens*Df(xdata)*DX/dx |
---|
787 | |
---|
788 | #### GSASII peak fitting routine: Finger, Cox & Jephcoat model |
---|
789 | |
---|
790 | def getWidthsCW(pos,sig,gam,shl): |
---|
791 | '''Compute the peak widths used for computing the range of a peak |
---|
792 | for constant wavelength data. On low-angle side, 50 FWHM are used, |
---|
793 | on high-angle side 75 are used, high angle side extended for axial divergence |
---|
794 | (for peaks above 90 deg, these are reversed.) |
---|
795 | |
---|
796 | :param pos: peak position; 2-theta in degrees |
---|
797 | :param sig: Gaussian peak variance in centideg^2 |
---|
798 | :param gam: Lorentzian peak width in centidegrees |
---|
799 | :param shl: axial divergence parameter (S+H)/L |
---|
800 | |
---|
801 | :returns: widths; [Gaussian sigma, Lorentzian gamma] in degrees, and |
---|
802 | low angle, high angle ends of peak; 20 FWHM & 50 FWHM from position |
---|
803 | reversed for 2-theta > 90 deg. |
---|
804 | ''' |
---|
805 | widths = [np.sqrt(sig)/100.,gam/100.] |
---|
806 | fwhm = 2.355*widths[0]+widths[1] |
---|
807 | fmin = 50.*(fwhm+shl*abs(npcosd(pos))) |
---|
808 | fmax = 75.0*fwhm |
---|
809 | if pos > 90: |
---|
810 | fmin,fmax = [fmax,fmin] |
---|
811 | return widths,fmin,fmax |
---|
812 | |
---|
813 | def getWidthsED(pos,sig): |
---|
814 | '''Compute the peak widths used for computing the range of a peak |
---|
815 | for energy dispersive data. On low-energy side, 20 FWHM are used, |
---|
816 | on high-energy side 20 are used |
---|
817 | |
---|
818 | :param pos: peak position; energy in keV (not used) |
---|
819 | :param sig: Gaussian peak variance in keV^2 |
---|
820 | |
---|
821 | :returns: widths; [Gaussian sigma] in keV, and |
---|
822 | low angle, high angle ends of peak; 20 FWHM & 50 FWHM from position |
---|
823 | ''' |
---|
824 | widths = [np.sqrt(sig),.001] |
---|
825 | fwhm = 2.355*widths[0] |
---|
826 | fmin = 5.*fwhm |
---|
827 | fmax = 5.*fwhm |
---|
828 | return widths,fmin,fmax |
---|
829 | |
---|
830 | def getWidthsTOF(pos,alp,bet,sig,gam): |
---|
831 | '''Compute the peak widths used for computing the range of a peak |
---|
832 | for constant wavelength data. 50 FWHM are used on both sides each |
---|
833 | extended by exponential coeff. |
---|
834 | |
---|
835 | param pos: peak position; TOF in musec (not used) |
---|
836 | param alp,bet: TOF peak exponential rise & decay parameters |
---|
837 | param sig: Gaussian peak variance in musec^2 |
---|
838 | param gam: Lorentzian peak width in musec |
---|
839 | |
---|
840 | returns: widths; [Gaussian sigma, Lornetzian gamma] in musec |
---|
841 | returns: low TOF, high TOF ends of peak; 50FWHM from position |
---|
842 | ''' |
---|
843 | widths = [np.sqrt(sig),gam] |
---|
844 | fwhm = 2.355*widths[0]+2.*widths[1] |
---|
845 | fmin = 50.*fwhm*(1.+1./alp) |
---|
846 | fmax = 50.*fwhm*(1.+1./bet) |
---|
847 | return widths,fmin,fmax |
---|
848 | |
---|
849 | def getFWHM(pos,Inst,N=1): |
---|
850 | '''Compute total FWHM from Thompson, Cox & Hastings (1987) , J. Appl. Cryst. 20, 79-83 |
---|
851 | via getgamFW(g,s). |
---|
852 | |
---|
853 | :param pos: float peak position in deg 2-theta or tof in musec |
---|
854 | :param Inst: dict instrument parameters |
---|
855 | :param N: int Inst index (0 for input, 1 for fitted) |
---|
856 | |
---|
857 | :returns float: total FWHM of pseudoVoigt in deg or musec |
---|
858 | ''' |
---|
859 | |
---|
860 | sig = lambda Th,U,V,W: np.sqrt(max(0.001,U*tand(Th)**2+V*tand(Th)+W)) |
---|
861 | sigED = lambda E,A,B,C: np.sqrt(max(0.001,A*E**2+B*E+C)) |
---|
862 | sigTOF = lambda dsp,S0,S1,S2,Sq: np.sqrt(S0+S1*dsp**2+S2*dsp**4+Sq*dsp) |
---|
863 | gam = lambda Th,X,Y,Z: Z+X/cosd(Th)+Y*tand(Th) |
---|
864 | gamTOF = lambda dsp,X,Y,Z: Z+X*dsp+Y*dsp**2 |
---|
865 | alpTOF = lambda dsp,alp: alp/dsp |
---|
866 | betTOF = lambda dsp,bet0,bet1,betq: bet0+bet1/dsp**4+betq/dsp**2 |
---|
867 | alpPink = lambda pos,alp0,alp1: alp0+alp1*tand(pos/2.) |
---|
868 | betPink = lambda pos,bet0,bet1: bet0+bet1*tand(pos/2.) |
---|
869 | if 'LF' in Inst['Type'][0]: |
---|
870 | return 3 |
---|
871 | elif 'T' in Inst['Type'][0]: |
---|
872 | dsp = pos/Inst['difC'][N] |
---|
873 | alp = alpTOF(dsp,Inst['alpha'][N]) |
---|
874 | bet = betTOF(dsp,Inst['beta-0'][1],Inst['beta-1'][N],Inst['beta-q'][N]) |
---|
875 | s = sigTOF(dsp,Inst['sig-0'][N],Inst['sig-1'][N],Inst['sig-2'][N],Inst['sig-q'][N]) |
---|
876 | g = gamTOF(dsp,Inst['X'][N],Inst['Y'][N],Inst['Z'][N]) |
---|
877 | return getgamFW(g,s)+np.log(2.0)*(alp+bet)/(alp*bet) |
---|
878 | elif 'C' in Inst['Type'][0]: |
---|
879 | s = sig(pos/2.,Inst['U'][N],Inst['V'][N],Inst['W'][N]) |
---|
880 | g = gam(pos/2.,Inst['X'][N],Inst['Y'][N],Inst['Z'][N]) |
---|
881 | return getgamFW(g,s)/100. #returns FWHM in deg |
---|
882 | elif 'E' in Inst['Type'][0]: |
---|
883 | s = sigED(pos,Inst['A'][N],Inst['B'][N],Inst['C'][N]) |
---|
884 | return 2.35482*s |
---|
885 | else: #'B' |
---|
886 | alp = alpPink(pos,Inst['alpha-0'][N],Inst['alpha-1'][N]) |
---|
887 | bet = betPink(pos,Inst['beta-0'][N],Inst['beta-1'][N]) |
---|
888 | s = sig(pos/2.,Inst['U'][N],Inst['V'][N],Inst['W'][N]) |
---|
889 | g = gam(pos/2.,Inst['X'][N],Inst['Y'][N],Inst['Z'][N]) |
---|
890 | return getgamFW(g,s)/100.+np.log(2.0)*(alp+bet)/(alp*bet) #returns FWHM in deg |
---|
891 | |
---|
892 | def getgamFW(g,s): |
---|
893 | '''Compute total FWHM from Thompson, Cox & Hastings (1987), J. Appl. Cryst. 20, 79-83 |
---|
894 | lambda fxn needs FWHM for both Gaussian & Lorentzian components |
---|
895 | |
---|
896 | :param g: float Lorentzian gamma = FWHM(L) |
---|
897 | :param s: float Gaussian sig |
---|
898 | |
---|
899 | :returns float: total FWHM of pseudoVoigt |
---|
900 | ''' |
---|
901 | gamFW = lambda s,g: np.exp(np.log(s**5+2.69269*s**4*g+2.42843*s**3*g**2+4.47163*s**2*g**3+0.07842*s*g**4+g**5)/5.) |
---|
902 | return gamFW(2.35482*s,g) #sqrt(8ln2)*sig = FWHM(G) |
---|
903 | |
---|
904 | def getBackground(pfx,parmDict,bakType,dataType,xdata,fixback=None): |
---|
905 | '''Computes the background based on parameters that may be taken from |
---|
906 | a gpx file or the data tree. |
---|
907 | |
---|
908 | :param str pfx: histogram prefix (:h:) |
---|
909 | :param dict parmDict: Refinement parameter values |
---|
910 | :param str bakType: defines background function to be used. Should be |
---|
911 | one of these: 'chebyschev', 'cosine', 'chebyschev-1', |
---|
912 | 'Q^2 power series', 'Q^-2 power series', 'lin interpolate', |
---|
913 | 'inv interpolate', 'log interpolate' |
---|
914 | :param str dataType: Code to indicate histogram type (PXC, PNC, PNT,...) |
---|
915 | :param MaskedArray xdata: independent variable, 2theta (deg*100) or |
---|
916 | TOF (microsec?) |
---|
917 | :param numpy.array fixback: Array of fixed background points (length |
---|
918 | matching xdata) or None |
---|
919 | |
---|
920 | :returns: yb,sumBK where yp is an array of background values (length |
---|
921 | matching xdata) and sumBK is a list with three values. The sumBK[0] is |
---|
922 | the sum of all yb values, sumBK[1] is the sum of Debye background terms |
---|
923 | and sumBK[2] is the sum of background peaks. |
---|
924 | ''' |
---|
925 | if 'T' in dataType: |
---|
926 | q = 2.*np.pi*parmDict[pfx+'difC']/xdata |
---|
927 | elif 'E' in dataType: |
---|
928 | const = 4.*np.pi*npsind(parmDict[pfx+'2-theta']/2.0) |
---|
929 | q = const*xdata |
---|
930 | else: |
---|
931 | wave = parmDict.get(pfx+'Lam',parmDict.get(pfx+'Lam1',1.0)) |
---|
932 | q = npT2q(xdata,wave) |
---|
933 | yb = np.zeros_like(xdata) |
---|
934 | nBak = 0 |
---|
935 | sumBk = [0.,0.,0] |
---|
936 | while True: |
---|
937 | key = pfx+'Back;'+str(nBak) |
---|
938 | if key in parmDict: |
---|
939 | nBak += 1 |
---|
940 | else: |
---|
941 | break |
---|
942 | #empirical functions |
---|
943 | if bakType in ['chebyschev','cosine','chebyschev-1']: |
---|
944 | dt = xdata[-1]-xdata[0] |
---|
945 | for iBak in range(nBak): |
---|
946 | key = pfx+'Back;'+str(iBak) |
---|
947 | if bakType == 'chebyschev': |
---|
948 | ybi = parmDict[key]*(-1.+2.*(xdata-xdata[0])/dt)**iBak |
---|
949 | elif bakType == 'chebyschev-1': |
---|
950 | xpos = -1.+2.*(xdata-xdata[0])/dt |
---|
951 | ybi = parmDict[key]*np.cos(iBak*np.arccos(xpos)) |
---|
952 | elif bakType == 'cosine': |
---|
953 | ybi = parmDict[key]*npcosd(180.*xdata*iBak/xdata[-1]) |
---|
954 | yb += ybi |
---|
955 | sumBk[0] = np.sum(yb) |
---|
956 | elif bakType in ['Q^2 power series','Q^-2 power series']: |
---|
957 | QT = 1. |
---|
958 | yb += np.ones_like(yb)*parmDict[pfx+'Back;0'] |
---|
959 | for iBak in range(nBak-1): |
---|
960 | key = pfx+'Back;'+str(iBak+1) |
---|
961 | if '-2' in bakType: |
---|
962 | QT *= (iBak+1)*q**-2 |
---|
963 | else: |
---|
964 | QT *= q**2/(iBak+1) |
---|
965 | yb += QT*parmDict[key] |
---|
966 | sumBk[0] = np.sum(yb) |
---|
967 | elif bakType in ['lin interpolate','inv interpolate','log interpolate',]: |
---|
968 | if nBak == 1: |
---|
969 | yb = np.ones_like(xdata)*parmDict[pfx+'Back;0'] |
---|
970 | elif nBak == 2: |
---|
971 | dX = xdata[-1]-xdata[0] |
---|
972 | T2 = (xdata-xdata[0])/dX |
---|
973 | T1 = 1.0-T2 |
---|
974 | yb = parmDict[pfx+'Back;0']*T1+parmDict[pfx+'Back;1']*T2 |
---|
975 | else: |
---|
976 | xnomask = ma.getdata(xdata) |
---|
977 | xmin,xmax = xnomask[0],xnomask[-1] |
---|
978 | if bakType == 'lin interpolate': |
---|
979 | bakPos = np.linspace(xmin,xmax,nBak,True) |
---|
980 | elif bakType == 'inv interpolate': |
---|
981 | bakPos = 1./np.linspace(1./xmax,1./xmin,nBak,True) |
---|
982 | elif bakType == 'log interpolate': |
---|
983 | bakPos = np.exp(np.linspace(np.log(xmin),np.log(xmax),nBak,True)) |
---|
984 | bakPos[0] = xmin |
---|
985 | bakPos[-1] = xmax |
---|
986 | bakVals = np.zeros(nBak) |
---|
987 | for i in range(nBak): |
---|
988 | bakVals[i] = parmDict[pfx+'Back;'+str(i)] |
---|
989 | bakInt = si.interp1d(bakPos,bakVals,'linear') |
---|
990 | yb = bakInt(ma.getdata(xdata)) |
---|
991 | sumBk[0] = np.sum(yb) |
---|
992 | #Debye function |
---|
993 | if pfx+'difC' in parmDict or 'E' in dataType: |
---|
994 | ff = 1. |
---|
995 | else: |
---|
996 | try: |
---|
997 | wave = parmDict[pfx+'Lam'] |
---|
998 | except KeyError: |
---|
999 | wave = parmDict[pfx+'Lam1'] |
---|
1000 | SQ = (q/(4.*np.pi))**2 |
---|
1001 | FF = G2elem.GetFormFactorCoeff('Si')[0] |
---|
1002 | ff = np.array(G2elem.ScatFac(FF,SQ)[0])**2 |
---|
1003 | iD = 0 |
---|
1004 | while True: |
---|
1005 | try: |
---|
1006 | dbA = parmDict[pfx+'DebyeA;'+str(iD)] |
---|
1007 | dbR = parmDict[pfx+'DebyeR;'+str(iD)] |
---|
1008 | dbU = parmDict[pfx+'DebyeU;'+str(iD)] |
---|
1009 | ybi = ff*dbA*np.sin(q*dbR)*np.exp(-dbU*q**2)/(q*dbR) |
---|
1010 | yb += ybi |
---|
1011 | sumBk[1] += np.sum(ybi) |
---|
1012 | iD += 1 |
---|
1013 | except KeyError: |
---|
1014 | break |
---|
1015 | #peaks |
---|
1016 | iD = 0 |
---|
1017 | while True: |
---|
1018 | try: |
---|
1019 | pkP = parmDict[pfx+'BkPkpos;'+str(iD)] |
---|
1020 | pkI = max(parmDict[pfx+'BkPkint;'+str(iD)],0.1) |
---|
1021 | pkS = max(parmDict[pfx+'BkPksig;'+str(iD)],0.01) |
---|
1022 | pkG = max(parmDict[pfx+'BkPkgam;'+str(iD)],0.1) |
---|
1023 | if 'C' in dataType: |
---|
1024 | Wd,fmin,fmax = getWidthsCW(pkP,pkS,pkG,.002) |
---|
1025 | elif 'E' in dataType: |
---|
1026 | Wd,fmin,fmax = getWidthsED(pkP,pkS) |
---|
1027 | else: #'T'OF |
---|
1028 | Wd,fmin,fmax = getWidthsTOF(pkP,1.,1.,pkS,pkG) |
---|
1029 | iBeg = np.searchsorted(xdata,pkP-fmin) |
---|
1030 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
1031 | lenX = len(xdata) |
---|
1032 | if not iBeg: |
---|
1033 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
1034 | elif iBeg == lenX: |
---|
1035 | iFin = iBeg |
---|
1036 | else: |
---|
1037 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
1038 | if 'C' in dataType: |
---|
1039 | ybi = pkI*getFCJVoigt3(pkP,pkS,pkG,0.002,xdata[iBeg:iFin])[0] |
---|
1040 | elif 'T' in dataType: |
---|
1041 | ybi = pkI*getEpsVoigt(pkP,1.,1.,pkS,pkG,xdata[iBeg:iFin])[0] |
---|
1042 | elif 'B' in dataType: |
---|
1043 | ybi = pkI*getEpsVoigt(pkP,1.,1.,pkS/100.,pkG/1.e4,xdata[iBeg:iFin])[0] |
---|
1044 | elif 'E' in dataType: |
---|
1045 | ybi = pkI*getPsVoigt(pkP,pkS*10.**4,pkG*100.,xdata[iBeg:iFin])[0] |
---|
1046 | else: |
---|
1047 | raise Exception('dataType of {:} should not happen!'.format(dataType)) |
---|
1048 | yb[iBeg:iFin] += ybi |
---|
1049 | sumBk[2] += np.sum(ybi) |
---|
1050 | iD += 1 |
---|
1051 | except KeyError: |
---|
1052 | break |
---|
1053 | except ValueError: |
---|
1054 | G2fil.G2Print ('**** WARNING - backround peak '+str(iD)+' sigma is negative; fix & try again ****') |
---|
1055 | break |
---|
1056 | if fixback is not None: |
---|
1057 | yb += parmDict[pfx+'BF mult']*fixback |
---|
1058 | sumBk[0] = sum(yb) |
---|
1059 | return yb,sumBk |
---|
1060 | |
---|
1061 | def getBackgroundDerv(hfx,parmDict,bakType,dataType,xdata,fixback=None): |
---|
1062 | '''Computes the derivatives of the background |
---|
1063 | Parameters passed to this may be pulled from gpx file or data tree. |
---|
1064 | See :func:`getBackground` for parameter definitions. |
---|
1065 | |
---|
1066 | :returns: dydb,dyddb,dydpk,dydfb where the first three are 2-D arrays |
---|
1067 | of derivatives with respect to the background terms, the Debye terms and |
---|
1068 | the background peak terms vs. the points in the diffracton pattern. The |
---|
1069 | final 1D array is the derivative with respect to the fixed-background |
---|
1070 | multiplier (= the fixed background values). |
---|
1071 | ''' |
---|
1072 | if 'T' in dataType: |
---|
1073 | q = 2.*np.pi*parmDict[hfx+'difC']/xdata |
---|
1074 | elif 'E' in dataType: |
---|
1075 | const = 4.*np.pi*npsind(parmDict[hfx+'2-theta']/2.0) |
---|
1076 | q = const*xdata |
---|
1077 | else: |
---|
1078 | wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0)) |
---|
1079 | q = 2.*np.pi*npsind(xdata/2.)/wave |
---|
1080 | nBak = 0 |
---|
1081 | while True: |
---|
1082 | key = hfx+'Back;'+str(nBak) |
---|
1083 | if key in parmDict: |
---|
1084 | nBak += 1 |
---|
1085 | else: |
---|
1086 | break |
---|
1087 | dydb = np.zeros(shape=(nBak,len(xdata))) |
---|
1088 | dyddb = np.zeros(shape=(3*parmDict[hfx+'nDebye'],len(xdata))) |
---|
1089 | dydpk = np.zeros(shape=(4*parmDict[hfx+'nPeaks'],len(xdata))) |
---|
1090 | dydfb = [] |
---|
1091 | |
---|
1092 | if bakType in ['chebyschev','cosine','chebyschev-1']: |
---|
1093 | dt = xdata[-1]-xdata[0] |
---|
1094 | for iBak in range(nBak): |
---|
1095 | if bakType == 'chebyschev': |
---|
1096 | dydb[iBak] = (-1.+2.*(xdata-xdata[0])/dt)**iBak |
---|
1097 | elif bakType == 'chebyschev-1': |
---|
1098 | xpos = -1.+2.*(xdata-xdata[0])/dt |
---|
1099 | dydb[iBak] = np.cos(iBak*np.arccos(xpos)) |
---|
1100 | elif bakType == 'cosine': |
---|
1101 | dydb[iBak] = npcosd(180.*xdata*iBak/xdata[-1]) |
---|
1102 | elif bakType in ['Q^2 power series','Q^-2 power series']: |
---|
1103 | QT = 1. |
---|
1104 | dydb[0] = np.ones_like(xdata) |
---|
1105 | for iBak in range(nBak-1): |
---|
1106 | if '-2' in bakType: |
---|
1107 | QT *= (iBak+1)*q**-2 |
---|
1108 | else: |
---|
1109 | QT *= q**2/(iBak+1) |
---|
1110 | dydb[iBak+1] = QT |
---|
1111 | elif bakType in ['lin interpolate','inv interpolate','log interpolate',]: |
---|
1112 | if nBak == 1: |
---|
1113 | dydb[0] = np.ones_like(xdata) |
---|
1114 | elif nBak == 2: |
---|
1115 | dX = xdata[-1]-xdata[0] |
---|
1116 | T2 = (xdata-xdata[0])/dX |
---|
1117 | T1 = 1.0-T2 |
---|
1118 | dydb = [T1,T2] |
---|
1119 | else: |
---|
1120 | xnomask = ma.getdata(xdata) |
---|
1121 | xmin,xmax = xnomask[0],xnomask[-1] |
---|
1122 | if bakType == 'lin interpolate': |
---|
1123 | bakPos = np.linspace(xmin,xmax,nBak,True) |
---|
1124 | elif bakType == 'inv interpolate': |
---|
1125 | bakPos = 1./np.linspace(1./xmax,1./xmin,nBak,True) |
---|
1126 | elif bakType == 'log interpolate': |
---|
1127 | bakPos = np.exp(np.linspace(np.log(xmin),np.log(xmax),nBak,True)) |
---|
1128 | bakPos[0] = xmin |
---|
1129 | bakPos[-1] = xmax |
---|
1130 | for i,pos in enumerate(bakPos): |
---|
1131 | if i == 0: |
---|
1132 | dydb[0] = np.where(xdata<bakPos[1],(bakPos[1]-xdata)/(bakPos[1]-bakPos[0]),0.) |
---|
1133 | elif i == len(bakPos)-1: |
---|
1134 | dydb[i] = np.where(xdata>bakPos[-2],(bakPos[-1]-xdata)/(bakPos[-1]-bakPos[-2]),0.) |
---|
1135 | else: |
---|
1136 | dydb[i] = np.where(xdata>bakPos[i], |
---|
1137 | np.where(xdata<bakPos[i+1],(bakPos[i+1]-xdata)/(bakPos[i+1]-bakPos[i]),0.), |
---|
1138 | np.where(xdata>bakPos[i-1],(xdata-bakPos[i-1])/(bakPos[i]-bakPos[i-1]),0.)) |
---|
1139 | if hfx+'difC' in parmDict: |
---|
1140 | ff = 1. |
---|
1141 | else: |
---|
1142 | wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0)) |
---|
1143 | q = npT2q(xdata,wave) |
---|
1144 | SQ = (q/(4*np.pi))**2 |
---|
1145 | FF = G2elem.GetFormFactorCoeff('Si')[0] |
---|
1146 | ff = np.array(G2elem.ScatFac(FF,SQ)[0])*np.pi**2 #needs pi^2~10. for cw data (why?) |
---|
1147 | iD = 0 |
---|
1148 | while True: |
---|
1149 | try: |
---|
1150 | if hfx+'difC' in parmDict: |
---|
1151 | q = 2*np.pi*parmDict[hfx+'difC']/xdata |
---|
1152 | dbA = parmDict[hfx+'DebyeA;'+str(iD)] |
---|
1153 | dbR = parmDict[hfx+'DebyeR;'+str(iD)] |
---|
1154 | dbU = parmDict[hfx+'DebyeU;'+str(iD)] |
---|
1155 | sqr = np.sin(q*dbR)/(q*dbR) |
---|
1156 | cqr = np.cos(q*dbR) |
---|
1157 | temp = np.exp(-dbU*q**2) |
---|
1158 | dyddb[3*iD] = ff*sqr*temp |
---|
1159 | dyddb[3*iD+1] = ff*dbA*temp*(cqr-sqr)/(dbR) |
---|
1160 | dyddb[3*iD+2] = -ff*dbA*sqr*temp*q**2 |
---|
1161 | iD += 1 |
---|
1162 | except KeyError: |
---|
1163 | break |
---|
1164 | iD = 0 |
---|
1165 | while True: |
---|
1166 | try: |
---|
1167 | pkP = parmDict[hfx+'BkPkpos;'+str(iD)] |
---|
1168 | pkI = max(parmDict[hfx+'BkPkint;'+str(iD)],0.1) |
---|
1169 | pkS = max(parmDict[hfx+'BkPksig;'+str(iD)],0.01) |
---|
1170 | pkG = max(parmDict[hfx+'BkPkgam;'+str(iD)],0.1) |
---|
1171 | if 'C' in dataType: |
---|
1172 | Wd,fmin,fmax = getWidthsCW(pkP,pkS,pkG,.002) |
---|
1173 | elif 'E' in dataType: |
---|
1174 | Wd,fmin,fmax = getWidthsED(pkP,pkS) |
---|
1175 | else: #'T' or 'B' |
---|
1176 | Wd,fmin,fmax = getWidthsTOF(pkP,1.,1.,pkS,pkG) |
---|
1177 | iBeg = np.searchsorted(xdata,pkP-fmin) |
---|
1178 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
1179 | lenX = len(xdata) |
---|
1180 | if not iBeg: |
---|
1181 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
1182 | elif iBeg == lenX: |
---|
1183 | iFin = iBeg |
---|
1184 | else: |
---|
1185 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
1186 | if 'C' in dataType: |
---|
1187 | Df,dFdp,dFds,dFdg,x = getdFCJVoigt3(pkP,pkS,pkG,.002,xdata[iBeg:iFin]) |
---|
1188 | elif 'E' in dataType: |
---|
1189 | Df,dFdp,dFds,dFdg = getdPsVoigt(pkP,pkS*10.**4,pkG*100.,xdata[iBeg:iFin]) |
---|
1190 | else: #'T'OF |
---|
1191 | Df,dFdp,x,x,dFds,dFdg = getdEpsVoigt(pkP,1.,1.,pkS,pkG,xdata[iBeg:iFin]) |
---|
1192 | dydpk[4*iD][iBeg:iFin] += pkI*dFdp |
---|
1193 | dydpk[4*iD+1][iBeg:iFin] += Df |
---|
1194 | dydpk[4*iD+2][iBeg:iFin] += pkI*dFds |
---|
1195 | dydpk[4*iD+3][iBeg:iFin] += pkI*dFdg |
---|
1196 | iD += 1 |
---|
1197 | except KeyError: |
---|
1198 | break |
---|
1199 | except ValueError: |
---|
1200 | G2fil.G2Print ('**** WARNING - backround peak '+str(iD)+' sigma is negative; fix & try again ****') |
---|
1201 | break |
---|
1202 | # fixed background from file |
---|
1203 | if fixback is not None: |
---|
1204 | dydfb = fixback |
---|
1205 | return dydb,dyddb,dydpk,dydfb |
---|
1206 | |
---|
1207 | #### Using old gsas fortran routines for powder peak shapes & derivatives |
---|
1208 | def getFCJVoigt3(pos,sig,gam,shl,xdata): |
---|
1209 | '''Compute the Finger-Cox-Jepcoat modified Pseudo-Voigt function for a |
---|
1210 | CW powder peak in external Fortran routine |
---|
1211 | |
---|
1212 | param pos: peak position in degrees |
---|
1213 | param sig: Gaussian variance in centideg^2 |
---|
1214 | param gam: Lorentzian width in centideg |
---|
1215 | param shl: axial divergence parameter (S+H)/L |
---|
1216 | param xdata: array; profile points for peak to be calculated; bounded by 20FWHM to 50FWHM (or vv) |
---|
1217 | |
---|
1218 | returns: array: calculated peak function at each xdata |
---|
1219 | returns: integral of peak; nominally = 1.0 |
---|
1220 | ''' |
---|
1221 | if len(xdata): |
---|
1222 | cw = np.diff(xdata) |
---|
1223 | cw = np.append(cw,cw[-1]) |
---|
1224 | Df = pyd.pypsvfcj(len(xdata),xdata-pos,pos,sig,gam,shl) |
---|
1225 | return Df,np.sum(100.*Df*cw) |
---|
1226 | else: |
---|
1227 | return 0.,1. |
---|
1228 | |
---|
1229 | def getdFCJVoigt3(pos,sig,gam,shl,xdata): |
---|
1230 | '''Compute analytic derivatives the Finger-Cox-Jepcoat modified Pseudo-Voigt |
---|
1231 | function for a CW powder peak |
---|
1232 | |
---|
1233 | param pos: peak position in degrees |
---|
1234 | param sig: Gaussian variance in centideg^2 |
---|
1235 | param gam: Lorentzian width in centideg |
---|
1236 | param shl: axial divergence parameter (S+H)/L |
---|
1237 | param xdata: array; profile points for peak to be calculated; bounded by 20FWHM to 50FWHM (or vv) |
---|
1238 | |
---|
1239 | returns: arrays: function and derivatives of pos, sig, gam, & shl |
---|
1240 | ''' |
---|
1241 | Df,dFdp,dFds,dFdg,dFdsh = pyd.pydpsvfcj(len(xdata),xdata-pos,pos,sig,gam,shl) |
---|
1242 | return Df,dFdp,dFds,dFdg,dFdsh |
---|
1243 | |
---|
1244 | def getPsVoigt(pos,sig,gam,xdata): |
---|
1245 | '''Compute the simple Pseudo-Voigt function for a |
---|
1246 | small angle Bragg peak in external Fortran routine |
---|
1247 | |
---|
1248 | param pos: peak position in degrees |
---|
1249 | param sig: Gaussian variance in centideg^2 |
---|
1250 | param gam: Lorentzian width in centideg |
---|
1251 | |
---|
1252 | returns: array: calculated peak function at each xdata |
---|
1253 | returns: integral of peak; nominally = 1.0 |
---|
1254 | ''' |
---|
1255 | |
---|
1256 | cw = np.diff(xdata) |
---|
1257 | cw = np.append(cw,cw[-1]) |
---|
1258 | Df = pyd.pypsvoigt(len(xdata),xdata-pos,sig,gam) |
---|
1259 | return Df,np.sum(100.*Df*cw) |
---|
1260 | |
---|
1261 | def getdPsVoigt(pos,sig,gam,xdata): |
---|
1262 | '''Compute the simple Pseudo-Voigt function derivatives for a |
---|
1263 | small angle Bragg peak peak in external Fortran routine |
---|
1264 | |
---|
1265 | param pos: peak position in degrees |
---|
1266 | param sig: Gaussian variance in centideg^2 |
---|
1267 | param gam: Lorentzian width in centideg |
---|
1268 | |
---|
1269 | returns: arrays: function and derivatives of pos, sig & gam |
---|
1270 | NB: the pos derivative has the opposite sign compared to that in other profile functions |
---|
1271 | ''' |
---|
1272 | |
---|
1273 | Df,dFdp,dFds,dFdg = pyd.pydpsvoigt(len(xdata),xdata-pos,sig,gam) |
---|
1274 | return Df,dFdp,dFds,dFdg |
---|
1275 | |
---|
1276 | def getEpsVoigt(pos,alp,bet,sig,gam,xdata): |
---|
1277 | '''Compute the double exponential Pseudo-Voigt convolution function for a |
---|
1278 | neutron TOF & CW pink peak in external Fortran routine |
---|
1279 | ''' |
---|
1280 | |
---|
1281 | cw = np.diff(xdata) |
---|
1282 | cw = np.append(cw,cw[-1]) |
---|
1283 | Df = pyd.pyepsvoigt(len(xdata),xdata-pos,alp,bet,sig,gam) |
---|
1284 | return Df,np.sum(Df*cw) |
---|
1285 | |
---|
1286 | def getdEpsVoigt(pos,alp,bet,sig,gam,xdata): |
---|
1287 | '''Compute the double exponential Pseudo-Voigt convolution function derivatives for a |
---|
1288 | neutron TOF & CW pink peak in external Fortran routine |
---|
1289 | ''' |
---|
1290 | |
---|
1291 | Df,dFdp,dFda,dFdb,dFds,dFdg = pyd.pydepsvoigt(len(xdata),xdata-pos,alp,bet,sig,gam) |
---|
1292 | return Df,dFdp,dFda,dFdb,dFds,dFdg |
---|
1293 | |
---|
1294 | def ellipseSize(H,Sij,GB): |
---|
1295 | '''Implements r=1/sqrt(sum((1/S)*(q.v)^2) per note from Alexander Brady |
---|
1296 | ''' |
---|
1297 | |
---|
1298 | HX = np.inner(H.T,GB) |
---|
1299 | lenHX = np.sqrt(np.sum(HX**2)) |
---|
1300 | Esize,Rsize = nl.eigh(G2lat.U6toUij(Sij)) |
---|
1301 | R = np.inner(HX/lenHX,Rsize)**2*Esize #want column length for hkl in crystal |
---|
1302 | lenR = 1./np.sqrt(np.sum(R)) |
---|
1303 | return lenR |
---|
1304 | |
---|
1305 | def ellipseSizeDerv(H,Sij,GB): |
---|
1306 | '''Implements r=1/sqrt(sum((1/S)*(q.v)^2) derivative per note from Alexander Brady |
---|
1307 | ''' |
---|
1308 | |
---|
1309 | lenR = ellipseSize(H,Sij,GB) |
---|
1310 | delt = 0.001 |
---|
1311 | dRdS = np.zeros(6) |
---|
1312 | for i in range(6): |
---|
1313 | Sij[i] -= delt |
---|
1314 | lenM = ellipseSize(H,Sij,GB) |
---|
1315 | Sij[i] += 2.*delt |
---|
1316 | lenP = ellipseSize(H,Sij,GB) |
---|
1317 | Sij[i] -= delt |
---|
1318 | dRdS[i] = (lenP-lenM)/(2.*delt) |
---|
1319 | return lenR,dRdS |
---|
1320 | |
---|
1321 | def getMustrain(HKL,G,SGData,muStrData): |
---|
1322 | if muStrData[0] == 'isotropic': |
---|
1323 | return np.ones(HKL.shape[1])*muStrData[1][0] |
---|
1324 | elif muStrData[0] == 'uniaxial': |
---|
1325 | H = np.array(HKL) |
---|
1326 | P = np.array(muStrData[3]) |
---|
1327 | cosP,sinP = np.array([G2lat.CosSinAngle(h,P,G) for h in H.T]).T |
---|
1328 | Si = muStrData[1][0] |
---|
1329 | Sa = muStrData[1][1] |
---|
1330 | return Si*Sa/(np.sqrt((Si*cosP)**2+(Sa*sinP)**2)) |
---|
1331 | else: #generalized - P.W. Stephens model |
---|
1332 | H = np.array(HKL) |
---|
1333 | rdsq = np.array([G2lat.calc_rDsq2(h,G) for h in H.T]) |
---|
1334 | Strms = np.array(G2spc.MustrainCoeff(H,SGData)) |
---|
1335 | Sum = np.sum(np.array(muStrData[4])[:,nxs]*Strms,axis=0) |
---|
1336 | return np.sqrt(Sum)/rdsq |
---|
1337 | |
---|
1338 | def getCrSize(HKL,G,GB,sizeData): |
---|
1339 | if sizeData[0] == 'isotropic': |
---|
1340 | return np.ones(HKL.shape[1])*sizeData[1][0] |
---|
1341 | elif sizeData[0] == 'uniaxial': |
---|
1342 | H = np.array(HKL) |
---|
1343 | P = np.array(sizeData[3]) |
---|
1344 | cosP,sinP = np.array([G2lat.CosSinAngle(h,P,G) for h in H.T]).T |
---|
1345 | Si = sizeData[1][0] |
---|
1346 | Sa = sizeData[1][1] |
---|
1347 | return Si*Sa/(np.sqrt((Si*cosP)**2+(Sa*sinP)**2)) |
---|
1348 | else: |
---|
1349 | Sij =[sizeData[4][i] for i in range(6)] |
---|
1350 | H = np.array(HKL) |
---|
1351 | return 1./np.array([ellipseSize(h,Sij,GB) for h in H.T])**2 |
---|
1352 | |
---|
1353 | def getHKLpeak(dmin,SGData,A,Inst=None,nodup=False): |
---|
1354 | ''' |
---|
1355 | Generates allowed by symmetry reflections with d >= dmin |
---|
1356 | NB: GenHKLf & checkMagextc return True for extinct reflections |
---|
1357 | |
---|
1358 | :param dmin: minimum d-spacing |
---|
1359 | :param SGData: space group data obtained from SpcGroup |
---|
1360 | :param A: lattice parameter terms A1-A6 |
---|
1361 | :param Inst: instrument parameter info |
---|
1362 | :returns: HKLs: np.array hkl, etc for allowed reflections |
---|
1363 | |
---|
1364 | ''' |
---|
1365 | HKL = G2lat.GenHLaue(dmin,SGData,A) |
---|
1366 | HKLs = [] |
---|
1367 | ds = [] |
---|
1368 | for h,k,l,d in HKL: |
---|
1369 | ext = G2spc.GenHKLf([h,k,l],SGData)[0] |
---|
1370 | if ext and 'MagSpGrp' in SGData: |
---|
1371 | ext = G2spc.checkMagextc([h,k,l],SGData) |
---|
1372 | if not ext: |
---|
1373 | if nodup and int(10000*d) in ds: |
---|
1374 | continue |
---|
1375 | ds.append(int(10000*d)) |
---|
1376 | if Inst == None: |
---|
1377 | HKLs.append([h,k,l,d,0,-1]) |
---|
1378 | else: |
---|
1379 | HKLs.append([h,k,l,d,G2lat.Dsp2pos(Inst,d),-1]) |
---|
1380 | return np.array(HKLs) |
---|
1381 | |
---|
1382 | def getHKLMpeak(dmin,Inst,SGData,SSGData,Vec,maxH,A): |
---|
1383 | 'needs a doc string' |
---|
1384 | HKLs = [] |
---|
1385 | vec = np.array(Vec) |
---|
1386 | vstar = np.sqrt(G2lat.calc_rDsq(vec,A)) #find extra needed for -n SS reflections |
---|
1387 | dvec = 1./(maxH*vstar+1./dmin) |
---|
1388 | HKL = G2lat.GenHLaue(dvec,SGData,A) |
---|
1389 | SSdH = [vec*h for h in range(-maxH,maxH+1)] |
---|
1390 | SSdH = dict(zip(range(-maxH,maxH+1),SSdH)) |
---|
1391 | ifMag = False |
---|
1392 | if 'MagSpGrp' in SGData: |
---|
1393 | ifMag = True |
---|
1394 | for h,k,l,d in HKL: |
---|
1395 | ext = G2spc.GenHKLf([h,k,l],SGData)[0] |
---|
1396 | if not ext and d >= dmin: |
---|
1397 | HKLs.append([h,k,l,0,d,G2lat.Dsp2pos(Inst,d),-1]) |
---|
1398 | for dH in SSdH: |
---|
1399 | if dH: |
---|
1400 | DH = SSdH[dH] |
---|
1401 | H = [h+DH[0],k+DH[1],l+DH[2]] |
---|
1402 | d = float(1/np.sqrt(G2lat.calc_rDsq(H,A))) |
---|
1403 | if d >= dmin: |
---|
1404 | HKLM = np.array([h,k,l,dH]) |
---|
1405 | if G2spc.checkSSextc(HKLM,SSGData) or ifMag: |
---|
1406 | HKLs.append([h,k,l,dH,d,G2lat.Dsp2pos(Inst,d),-1]) |
---|
1407 | return G2lat.sortHKLd(HKLs,True,True,True) |
---|
1408 | |
---|
1409 | peakInstPrmMode = True |
---|
1410 | '''Determines the mode used for peak fitting. When peakInstPrmMode=True peak |
---|
1411 | width parameters are computed from the instrument parameters (UVW,... or |
---|
1412 | alpha,... etc) unless the individual parameter is refined. This allows the |
---|
1413 | instrument parameters to be refined. When peakInstPrmMode=False, the instrument |
---|
1414 | parameters are not used and cannot be refined. |
---|
1415 | The default is peakFitMode=True. This is changed only in |
---|
1416 | :func:`setPeakInstPrmMode`, which is called from :mod:`GSASIIscriptable` |
---|
1417 | or GSASIIphsGUI.OnSetPeakWidMode ('Gen unvaried widths' menu item). |
---|
1418 | ''' |
---|
1419 | |
---|
1420 | def setPeakInstPrmMode(normal=True): |
---|
1421 | '''Determines the mode used for peak fitting. If normal=True (default) |
---|
1422 | peak width parameters are computed from the instrument parameters |
---|
1423 | unless the individual parameter is refined. If normal=False, |
---|
1424 | peak widths are used as supplied for each peak. |
---|
1425 | |
---|
1426 | Note that normal=True unless this routine is called. Also, |
---|
1427 | instrument parameters can only be refined with normal=True. |
---|
1428 | |
---|
1429 | :param bool normal: setting to apply to global variable |
---|
1430 | :data:`peakInstPrmMode` |
---|
1431 | ''' |
---|
1432 | global peakInstPrmMode |
---|
1433 | peakInstPrmMode = normal |
---|
1434 | |
---|
1435 | def getPeakProfile(dataType,parmDict,xdata,fixback,varyList,bakType): |
---|
1436 | '''Computes the profiles from multiple peaks for individual peak fitting |
---|
1437 | for powder patterns. |
---|
1438 | NB: not used for Rietveld refinement |
---|
1439 | ''' |
---|
1440 | |
---|
1441 | yb = getBackground('',parmDict,bakType,dataType,xdata,fixback)[0] |
---|
1442 | yc = np.zeros_like(yb) |
---|
1443 | if 'LF' in dataType: |
---|
1444 | if 'Lam1' in parmDict.keys(): |
---|
1445 | lam = parmDict['Lam1'] |
---|
1446 | lam2 = parmDict['Lam2'] |
---|
1447 | Ka2 = True |
---|
1448 | lamRatio = 360*(lam2-lam)/(np.pi*lam) |
---|
1449 | kRatio = parmDict['I(L2)/I(L1)'] |
---|
1450 | else: |
---|
1451 | lam = parmDict['Lam'] |
---|
1452 | Ka2 = False |
---|
1453 | shol = 0 |
---|
1454 | # loop over peaks |
---|
1455 | iPeak = -1 |
---|
1456 | try: |
---|
1457 | ncells = parmDict['ncell'] |
---|
1458 | clat = parmDict['clat'] |
---|
1459 | except KeyError: # no Laue info must be bkg fit |
---|
1460 | print('Laue Fit: no params, assuming bkg fit') |
---|
1461 | return yb |
---|
1462 | while True: |
---|
1463 | iPeak += 1 |
---|
1464 | try: |
---|
1465 | #Qcen = 2 * np.pi * lam * (iPeak+1) / parmDict['clat'] |
---|
1466 | l = parmDict['l'+str(iPeak)] |
---|
1467 | pos = 360 * np.arcsin(0.5 * lam * l / parmDict['clat']) / np.pi |
---|
1468 | parmDict['pos'+str(iPeak)] = pos |
---|
1469 | #tth = (pos-parmDict['Zero']) |
---|
1470 | intens = parmDict['int'+str(iPeak)] |
---|
1471 | damp = parmDict['damp'+str(iPeak)] |
---|
1472 | asym = parmDict['asym'+str(iPeak)] |
---|
1473 | sig = parmDict['sig'+str(iPeak)] |
---|
1474 | gam = parmDict['gam'+str(iPeak)] |
---|
1475 | fmin = 8 # for now make peaks 8 degrees wide |
---|
1476 | fmin = min(0.9*abs(xdata[-1] - xdata[0]),fmin) # unless the data range is smaller |
---|
1477 | iBeg = np.searchsorted(xdata,pos-fmin/2) |
---|
1478 | iFin = np.searchsorted(xdata,pos+fmin/2) |
---|
1479 | if not iBeg+iFin: # skip peak below low limit |
---|
1480 | continue |
---|
1481 | elif not iBeg-iFin: # got peak above high limit (peaks sorted, so we can stop) |
---|
1482 | break |
---|
1483 | #LF.plotme(fmin,lam,pos,intens,sig,gam,shol,ncells,clat,damp,asym) |
---|
1484 | #LaueFringePeakCalc(xdata,yc,lam,pos,intens,sig,gam,shol,ncells,clat,damp,asym,fmin,plot=(iPeak==0)) |
---|
1485 | LaueFringePeakCalc(xdata,yc,lam,pos,intens,sig,gam,shol,ncells,clat,damp,asym,fmin,plot=False) |
---|
1486 | if Ka2: |
---|
1487 | pos2 = pos+lamRatio*tand(pos/2.0) # + 360/pi * Dlam/lam * tan(th) |
---|
1488 | iBeg = np.searchsorted(xdata,pos2-fmin) |
---|
1489 | iFin = np.searchsorted(xdata,pos2+fmin) |
---|
1490 | if iBeg-iFin: |
---|
1491 | LaueFringePeakCalc(xdata,yc,lam2,pos2,intens*kRatio,sig,gam,shol,ncells,clat,damp,asym,fmin) |
---|
1492 | except KeyError: #no more peaks to process |
---|
1493 | return yb+yc |
---|
1494 | elif 'C' in dataType: |
---|
1495 | shl = max(parmDict['SH/L'],0.002) |
---|
1496 | Ka2 = False |
---|
1497 | if 'Lam1' in parmDict.keys(): |
---|
1498 | Ka2 = True |
---|
1499 | lamRatio = 360*(parmDict['Lam2']-parmDict['Lam1'])/(np.pi*parmDict['Lam1']) |
---|
1500 | kRatio = parmDict['I(L2)/I(L1)'] |
---|
1501 | iPeak = 0 |
---|
1502 | while True: |
---|
1503 | try: |
---|
1504 | pos = parmDict['pos'+str(iPeak)] |
---|
1505 | tth = (pos-parmDict['Zero']) |
---|
1506 | intens = parmDict['int'+str(iPeak)] |
---|
1507 | sigName = 'sig'+str(iPeak) |
---|
1508 | if sigName in varyList or not peakInstPrmMode: |
---|
1509 | sig = parmDict[sigName] |
---|
1510 | else: |
---|
1511 | sig = G2mth.getCWsig(parmDict,tth) |
---|
1512 | sig = max(sig,0.001) #avoid neg sigma^2 |
---|
1513 | gamName = 'gam'+str(iPeak) |
---|
1514 | if gamName in varyList or not peakInstPrmMode: |
---|
1515 | gam = parmDict[gamName] |
---|
1516 | else: |
---|
1517 | gam = G2mth.getCWgam(parmDict,tth) |
---|
1518 | gam = max(gam,0.001) #avoid neg gamma |
---|
1519 | Wd,fmin,fmax = getWidthsCW(pos,sig,gam,shl) |
---|
1520 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1521 | iFin = np.searchsorted(xdata,pos+fmin) |
---|
1522 | if not iBeg+iFin: #peak below low limit |
---|
1523 | iPeak += 1 |
---|
1524 | continue |
---|
1525 | elif not iBeg-iFin: #peak above high limit |
---|
1526 | return yb+yc |
---|
1527 | fp = getFCJVoigt3(pos,sig,gam,shl,xdata[iBeg:iFin])[0] |
---|
1528 | yc[iBeg:iFin] += intens*fp |
---|
1529 | if Ka2: |
---|
1530 | pos2 = pos+lamRatio*tand(pos/2.0) # + 360/pi * Dlam/lam * tan(th) |
---|
1531 | iBeg = np.searchsorted(xdata,pos2-fmin) |
---|
1532 | iFin = np.searchsorted(xdata,pos2+fmin) |
---|
1533 | if iBeg-iFin: |
---|
1534 | fp2 = getFCJVoigt3(pos2,sig,gam,shl,xdata[iBeg:iFin])[0] |
---|
1535 | yc[iBeg:iFin] += intens*kRatio*fp2 |
---|
1536 | iPeak += 1 |
---|
1537 | except KeyError: #no more peaks to process |
---|
1538 | return yb+yc |
---|
1539 | elif 'E' in dataType: |
---|
1540 | iPeak = 0 |
---|
1541 | dsp = 1.0 #for now - fix later |
---|
1542 | while True: |
---|
1543 | try: |
---|
1544 | pos = parmDict['pos'+str(iPeak)] |
---|
1545 | intens = parmDict['int'+str(iPeak)] |
---|
1546 | sigName = 'sig'+str(iPeak) |
---|
1547 | if sigName in varyList or not peakInstPrmMode: |
---|
1548 | sig = parmDict[sigName] |
---|
1549 | else: |
---|
1550 | sig = G2mth.getEDsig(parmDict,pos) |
---|
1551 | sig = max(sig,0.001) #avoid neg sigma^2 |
---|
1552 | Wd,fmin,fmax = getWidthsED(pos,sig) |
---|
1553 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1554 | iFin = max(iBeg+3,np.searchsorted(xdata,pos+fmin)) |
---|
1555 | if not iBeg+iFin: #peak below low limit |
---|
1556 | iPeak += 1 |
---|
1557 | continue |
---|
1558 | elif not iBeg-iFin: #peak above high limit |
---|
1559 | return yb+yc |
---|
1560 | yc[iBeg:iFin] += intens*getPsVoigt(pos,sig*10.**4,0.001,xdata[iBeg:iFin])[0] |
---|
1561 | iPeak += 1 |
---|
1562 | except KeyError: #no more peaks to process |
---|
1563 | return yb+yc |
---|
1564 | elif 'B' in dataType: |
---|
1565 | iPeak = 0 |
---|
1566 | dsp = 1.0 #for now - fix later |
---|
1567 | while True: |
---|
1568 | try: |
---|
1569 | pos = parmDict['pos'+str(iPeak)] |
---|
1570 | tth = (pos-parmDict['Zero']) |
---|
1571 | intens = parmDict['int'+str(iPeak)] |
---|
1572 | alpName = 'alp'+str(iPeak) |
---|
1573 | if alpName in varyList or not peakInstPrmMode: |
---|
1574 | alp = parmDict[alpName] |
---|
1575 | else: |
---|
1576 | alp = G2mth.getPinkalpha(parmDict,tth) |
---|
1577 | alp = max(0.1,alp) |
---|
1578 | betName = 'bet'+str(iPeak) |
---|
1579 | if betName in varyList or not peakInstPrmMode: |
---|
1580 | bet = parmDict[betName] |
---|
1581 | else: |
---|
1582 | bet = G2mth.getPinkbeta(parmDict,tth) |
---|
1583 | bet = max(0.1,bet) |
---|
1584 | sigName = 'sig'+str(iPeak) |
---|
1585 | if sigName in varyList or not peakInstPrmMode: |
---|
1586 | sig = parmDict[sigName] |
---|
1587 | else: |
---|
1588 | sig = G2mth.getCWsig(parmDict,tth) |
---|
1589 | sig = max(sig,0.001) #avoid neg sigma^2 |
---|
1590 | gamName = 'gam'+str(iPeak) |
---|
1591 | if gamName in varyList or not peakInstPrmMode: |
---|
1592 | gam = parmDict[gamName] |
---|
1593 | else: |
---|
1594 | gam = G2mth.getCWgam(parmDict,tth) |
---|
1595 | gam = max(gam,0.001) #avoid neg gamma |
---|
1596 | Wd,fmin,fmax = getWidthsTOF(pos,alp,bet,sig,gam) |
---|
1597 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1598 | iFin = np.searchsorted(xdata,pos+fmin) |
---|
1599 | if not iBeg+iFin: #peak below low limit |
---|
1600 | iPeak += 1 |
---|
1601 | continue |
---|
1602 | elif not iBeg-iFin: #peak above high limit |
---|
1603 | return yb+yc |
---|
1604 | yc[iBeg:iFin] += intens*getEpsVoigt(pos,alp,bet,sig/1.e4,gam/100.,xdata[iBeg:iFin])[0] |
---|
1605 | iPeak += 1 |
---|
1606 | except KeyError: #no more peaks to process |
---|
1607 | return yb+yc |
---|
1608 | else: |
---|
1609 | Pdabc = parmDict['Pdabc'] |
---|
1610 | difC = parmDict['difC'] |
---|
1611 | iPeak = 0 |
---|
1612 | while True: |
---|
1613 | try: |
---|
1614 | pos = parmDict['pos'+str(iPeak)] |
---|
1615 | tof = pos-parmDict['Zero'] |
---|
1616 | dsp = tof/difC |
---|
1617 | intens = parmDict['int'+str(iPeak)] |
---|
1618 | alpName = 'alp'+str(iPeak) |
---|
1619 | if alpName in varyList or not peakInstPrmMode: |
---|
1620 | alp = parmDict[alpName] |
---|
1621 | else: |
---|
1622 | if len(Pdabc): |
---|
1623 | alp = np.interp(dsp,Pdabc[0],Pdabc[1]) |
---|
1624 | else: |
---|
1625 | alp = G2mth.getTOFalpha(parmDict,dsp) |
---|
1626 | alp = max(0.1,alp) |
---|
1627 | betName = 'bet'+str(iPeak) |
---|
1628 | if betName in varyList or not peakInstPrmMode: |
---|
1629 | bet = parmDict[betName] |
---|
1630 | else: |
---|
1631 | if len(Pdabc): |
---|
1632 | bet = np.interp(dsp,Pdabc[0],Pdabc[2]) |
---|
1633 | else: |
---|
1634 | bet = G2mth.getTOFbeta(parmDict,dsp) |
---|
1635 | bet = max(0.0001,bet) |
---|
1636 | sigName = 'sig'+str(iPeak) |
---|
1637 | if sigName in varyList or not peakInstPrmMode: |
---|
1638 | sig = parmDict[sigName] |
---|
1639 | else: |
---|
1640 | sig = G2mth.getTOFsig(parmDict,dsp) |
---|
1641 | gamName = 'gam'+str(iPeak) |
---|
1642 | if gamName in varyList or not peakInstPrmMode: |
---|
1643 | gam = parmDict[gamName] |
---|
1644 | else: |
---|
1645 | gam = G2mth.getTOFgamma(parmDict,dsp) |
---|
1646 | gam = max(gam,0.001) #avoid neg gamma |
---|
1647 | Wd,fmin,fmax = getWidthsTOF(pos,alp,bet,sig,gam) |
---|
1648 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1649 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1650 | lenX = len(xdata) |
---|
1651 | if not iBeg: |
---|
1652 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1653 | elif iBeg == lenX: |
---|
1654 | iFin = iBeg |
---|
1655 | else: |
---|
1656 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1657 | if not iBeg+iFin: #peak below low limit |
---|
1658 | iPeak += 1 |
---|
1659 | continue |
---|
1660 | elif not iBeg-iFin: #peak above high limit |
---|
1661 | return yb+yc |
---|
1662 | yc[iBeg:iFin] += intens*getEpsVoigt(pos,alp,bet,sig,gam,xdata[iBeg:iFin])[0] |
---|
1663 | iPeak += 1 |
---|
1664 | except KeyError: #no more peaks to process |
---|
1665 | return yb+yc |
---|
1666 | |
---|
1667 | def getPeakProfileDerv(dataType,parmDict,xdata,fixback,varyList,bakType): |
---|
1668 | '''Computes the profile derivatives for a powder pattern for single peak fitting |
---|
1669 | |
---|
1670 | return: np.array([dMdx1,dMdx2,...]) in same order as varylist = backVary,insVary,peakVary order |
---|
1671 | |
---|
1672 | NB: not used for Rietveld refinement |
---|
1673 | ''' |
---|
1674 | dMdv = np.zeros(shape=(len(varyList),len(xdata))) |
---|
1675 | dMdb,dMddb,dMdpk,dMdfb = getBackgroundDerv('',parmDict,bakType,dataType,xdata,fixback) |
---|
1676 | if 'Back;0' in varyList: #background derivs are in front if present |
---|
1677 | dMdv[0:len(dMdb)] = dMdb |
---|
1678 | names = ['DebyeA','DebyeR','DebyeU'] |
---|
1679 | for name in varyList: |
---|
1680 | if 'Debye' in name: |
---|
1681 | parm,Id = name.split(';') |
---|
1682 | ip = names.index(parm) |
---|
1683 | dMdv[varyList.index(name)] = dMddb[3*int(Id)+ip] |
---|
1684 | names = ['BkPkpos','BkPkint','BkPksig','BkPkgam'] |
---|
1685 | for name in varyList: |
---|
1686 | if 'BkPk' in name: |
---|
1687 | parm,Id = name.split(';') |
---|
1688 | ip = names.index(parm) |
---|
1689 | dMdv[varyList.index(name)] = dMdpk[4*int(Id)+ip] |
---|
1690 | if 'LF' in dataType: |
---|
1691 | for i,name in enumerate(varyList): |
---|
1692 | if not np.all(dMdv[i] == 0): continue |
---|
1693 | deltaParmDict = parmDict.copy() |
---|
1694 | delta = max(parmDict[name]/1e5,0.001) |
---|
1695 | deltaParmDict[name] += delta |
---|
1696 | #print('num. deriv for',name,'val',deltaParmDict[name],'delta',delta) |
---|
1697 | intArrP = getPeakProfile(dataType,deltaParmDict,xdata,fixback,varyList,bakType) |
---|
1698 | deltaParmDict[name] -= 2*delta |
---|
1699 | intArrM = getPeakProfile(dataType,deltaParmDict,xdata,fixback,varyList,bakType) |
---|
1700 | dMdv[i] = 0.5 * (intArrP - intArrM) / delta |
---|
1701 | return dMdv |
---|
1702 | if 'C' in dataType: |
---|
1703 | shl = max(parmDict['SH/L'],0.002) |
---|
1704 | Ka2 = False |
---|
1705 | if 'Lam1' in parmDict.keys(): |
---|
1706 | Ka2 = True |
---|
1707 | lamRatio = 360*(parmDict['Lam2']-parmDict['Lam1'])/(np.pi*parmDict['Lam1']) |
---|
1708 | kRatio = parmDict['I(L2)/I(L1)'] |
---|
1709 | iPeak = 0 |
---|
1710 | while True: |
---|
1711 | try: |
---|
1712 | pos = parmDict['pos'+str(iPeak)] |
---|
1713 | tth = (pos-parmDict['Zero']) |
---|
1714 | intens = parmDict['int'+str(iPeak)] |
---|
1715 | sigName = 'sig'+str(iPeak) |
---|
1716 | if sigName in varyList or not peakInstPrmMode: |
---|
1717 | sig = parmDict[sigName] |
---|
1718 | dsdU = dsdV = dsdW = 0 |
---|
1719 | else: |
---|
1720 | sig = G2mth.getCWsig(parmDict,tth) |
---|
1721 | dsdU,dsdV,dsdW = G2mth.getCWsigDeriv(tth) |
---|
1722 | sig = max(sig,0.001) #avoid neg sigma |
---|
1723 | gamName = 'gam'+str(iPeak) |
---|
1724 | if gamName in varyList or not peakInstPrmMode: |
---|
1725 | gam = parmDict[gamName] |
---|
1726 | dgdX = dgdY = dgdZ = 0 |
---|
1727 | else: |
---|
1728 | gam = G2mth.getCWgam(parmDict,tth) |
---|
1729 | dgdX,dgdY,dgdZ = G2mth.getCWgamDeriv(tth) |
---|
1730 | gam = max(gam,0.001) #avoid neg gamma |
---|
1731 | Wd,fmin,fmax = getWidthsCW(pos,sig,gam,shl) |
---|
1732 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1733 | iFin = max(iBeg+3,np.searchsorted(xdata,pos+fmin)) |
---|
1734 | if not iBeg+iFin: #peak below low limit |
---|
1735 | iPeak += 1 |
---|
1736 | continue |
---|
1737 | elif not iBeg-iFin: #peak above high limit |
---|
1738 | break |
---|
1739 | dMdpk = np.zeros(shape=(6,len(xdata))) |
---|
1740 | dMdipk = getdFCJVoigt3(pos,sig,gam,shl,xdata[iBeg:iFin]) |
---|
1741 | for i in range(1,5): |
---|
1742 | dMdpk[i][iBeg:iFin] += intens*dMdipk[i] |
---|
1743 | dMdpk[0][iBeg:iFin] += dMdipk[0] |
---|
1744 | dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'sig':dMdpk[2],'gam':dMdpk[3],'shl':dMdpk[4]} |
---|
1745 | if Ka2: |
---|
1746 | pos2 = pos+lamRatio*tand(pos/2.0) # + 360/pi * Dlam/lam * tan(th) |
---|
1747 | iBeg = np.searchsorted(xdata,pos2-fmin) |
---|
1748 | iFin = np.searchsorted(xdata,pos2+fmin) |
---|
1749 | if iBeg-iFin: |
---|
1750 | dMdipk2 = getdFCJVoigt3(pos2,sig,gam,shl,xdata[iBeg:iFin]) |
---|
1751 | for i in range(1,5): |
---|
1752 | dMdpk[i][iBeg:iFin] += intens*kRatio*dMdipk2[i] |
---|
1753 | dMdpk[0][iBeg:iFin] += kRatio*dMdipk2[0] |
---|
1754 | dMdpk[5][iBeg:iFin] += dMdipk2[0] |
---|
1755 | dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'sig':dMdpk[2],'gam':dMdpk[3],'shl':dMdpk[4],'L1/L2':dMdpk[5]*intens} |
---|
1756 | for parmName in ['pos','int','sig','gam']: |
---|
1757 | try: |
---|
1758 | idx = varyList.index(parmName+str(iPeak)) |
---|
1759 | dMdv[idx] = dervDict[parmName] |
---|
1760 | except ValueError: |
---|
1761 | pass |
---|
1762 | if 'U' in varyList: |
---|
1763 | dMdv[varyList.index('U')] += dsdU*dervDict['sig'] |
---|
1764 | if 'V' in varyList: |
---|
1765 | dMdv[varyList.index('V')] += dsdV*dervDict['sig'] |
---|
1766 | if 'W' in varyList: |
---|
1767 | dMdv[varyList.index('W')] += dsdW*dervDict['sig'] |
---|
1768 | if 'X' in varyList: |
---|
1769 | dMdv[varyList.index('X')] += dgdX*dervDict['gam'] |
---|
1770 | if 'Y' in varyList: |
---|
1771 | dMdv[varyList.index('Y')] += dgdY*dervDict['gam'] |
---|
1772 | if 'Z' in varyList: |
---|
1773 | dMdv[varyList.index('Z')] += dgdZ*dervDict['gam'] |
---|
1774 | if 'SH/L' in varyList: |
---|
1775 | dMdv[varyList.index('SH/L')] += dervDict['shl'] #problem here |
---|
1776 | if 'I(L2)/I(L1)' in varyList: |
---|
1777 | dMdv[varyList.index('I(L2)/I(L1)')] += dervDict['L1/L2'] |
---|
1778 | iPeak += 1 |
---|
1779 | except KeyError: #no more peaks to process |
---|
1780 | break |
---|
1781 | elif 'E' in dataType: |
---|
1782 | iPeak = 0 |
---|
1783 | while True: |
---|
1784 | try: |
---|
1785 | pos = parmDict['pos'+str(iPeak)] |
---|
1786 | intens = parmDict['int'+str(iPeak)] |
---|
1787 | sigName = 'sig'+str(iPeak) |
---|
1788 | if sigName in varyList or not peakInstPrmMode: |
---|
1789 | sig = parmDict[sigName] |
---|
1790 | dsdA = dsdB = dsdC = 0 |
---|
1791 | else: |
---|
1792 | sig = G2mth.getEDsig(parmDict,pos) |
---|
1793 | dsdA,dsdB,dsdC = G2mth.getEDsigDeriv(parmDict,pos) |
---|
1794 | sig = max(sig,0.001) #avoid neg sigma |
---|
1795 | Wd,fmin,fmax = getWidthsED(pos,sig) |
---|
1796 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1797 | iFin = np.searchsorted(xdata,pos+fmin) |
---|
1798 | if not iBeg+iFin: #peak below low limit |
---|
1799 | iPeak += 1 |
---|
1800 | continue |
---|
1801 | elif not iBeg-iFin: #peak above high limit |
---|
1802 | break |
---|
1803 | dMdpk = np.zeros(shape=(4,len(xdata))) |
---|
1804 | dMdipk = getdPsVoigt(pos,sig*10.**4,0.001,xdata[iBeg:iFin]) |
---|
1805 | dMdpk[0][iBeg:iFin] += dMdipk[0] |
---|
1806 | for i in range(1,4): |
---|
1807 | dMdpk[i][iBeg:iFin] += intens*dMdipk[i] |
---|
1808 | dervDict = {'int':dMdpk[0],'pos':-dMdpk[1],'sig':dMdpk[2]*10**4} |
---|
1809 | for parmName in ['pos','int','sig']: |
---|
1810 | try: |
---|
1811 | idx = varyList.index(parmName+str(iPeak)) |
---|
1812 | dMdv[idx] = dervDict[parmName] |
---|
1813 | except ValueError: |
---|
1814 | pass |
---|
1815 | if 'A' in varyList: |
---|
1816 | dMdv[varyList.index('A')] += dsdA*dervDict['sig'] |
---|
1817 | if 'B' in varyList: |
---|
1818 | dMdv[varyList.index('B')] += dsdB*dervDict['sig'] |
---|
1819 | if 'C' in varyList: |
---|
1820 | dMdv[varyList.index('C')] += dsdC*dervDict['sig'] |
---|
1821 | iPeak += 1 |
---|
1822 | except KeyError: #no more peaks to process |
---|
1823 | break |
---|
1824 | |
---|
1825 | elif 'B' in dataType: |
---|
1826 | iPeak = 0 |
---|
1827 | while True: |
---|
1828 | try: |
---|
1829 | pos = parmDict['pos'+str(iPeak)] |
---|
1830 | tth = (pos-parmDict['Zero']) |
---|
1831 | intens = parmDict['int'+str(iPeak)] |
---|
1832 | alpName = 'alp'+str(iPeak) |
---|
1833 | if alpName in varyList or not peakInstPrmMode: |
---|
1834 | alp = parmDict[alpName] |
---|
1835 | dada0 = dada1 = 0.0 |
---|
1836 | else: |
---|
1837 | alp = G2mth.getPinkalpha(parmDict,tth) |
---|
1838 | dada0,dada1 = G2mth.getPinkalphaDeriv(tth) |
---|
1839 | alp = max(0.0001,alp) |
---|
1840 | betName = 'bet'+str(iPeak) |
---|
1841 | if betName in varyList or not peakInstPrmMode: |
---|
1842 | bet = parmDict[betName] |
---|
1843 | dbdb0 = dbdb1 = 0.0 |
---|
1844 | else: |
---|
1845 | bet = G2mth.getPinkbeta(parmDict,tth) |
---|
1846 | dbdb0,dbdb1 = G2mth.getPinkbetaDeriv(tth) |
---|
1847 | bet = max(0.0001,bet) |
---|
1848 | sigName = 'sig'+str(iPeak) |
---|
1849 | if sigName in varyList or not peakInstPrmMode: |
---|
1850 | sig = parmDict[sigName] |
---|
1851 | dsdU = dsdV = dsdW = 0 |
---|
1852 | else: |
---|
1853 | sig = G2mth.getCWsig(parmDict,tth) |
---|
1854 | dsdU,dsdV,dsdW = G2mth.getCWsigDeriv(tth) |
---|
1855 | sig = max(sig,0.001) #avoid neg sigma |
---|
1856 | gamName = 'gam'+str(iPeak) |
---|
1857 | if gamName in varyList or not peakInstPrmMode: |
---|
1858 | gam = parmDict[gamName] |
---|
1859 | dgdX = dgdY = dgdZ = 0 |
---|
1860 | else: |
---|
1861 | gam = G2mth.getCWgam(parmDict,tth) |
---|
1862 | dgdX,dgdY,dgdZ = G2mth.getCWgamDeriv(tth) |
---|
1863 | gam = max(gam,0.001) #avoid neg gamma |
---|
1864 | Wd,fmin,fmax = getWidthsTOF(pos,alp,bet,sig/1.e4,gam/100.) |
---|
1865 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1866 | iFin = np.searchsorted(xdata,pos+fmin) |
---|
1867 | if not iBeg+iFin: #peak below low limit |
---|
1868 | iPeak += 1 |
---|
1869 | continue |
---|
1870 | elif not iBeg-iFin: #peak above high limit |
---|
1871 | break |
---|
1872 | dMdpk = np.zeros(shape=(7,len(xdata))) |
---|
1873 | dMdipk = getdEpsVoigt(pos,alp,bet,sig/1.e4,gam/100.,xdata[iBeg:iFin]) |
---|
1874 | for i in range(1,6): |
---|
1875 | dMdpk[i][iBeg:iFin] += intens*dMdipk[i] |
---|
1876 | dMdpk[0][iBeg:iFin] += dMdipk[0] |
---|
1877 | dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'alp':dMdpk[2],'bet':dMdpk[3],'sig':dMdpk[4]/1.e4,'gam':dMdpk[5]/100.} |
---|
1878 | for parmName in ['pos','int','alp','bet','sig','gam']: |
---|
1879 | try: |
---|
1880 | idx = varyList.index(parmName+str(iPeak)) |
---|
1881 | dMdv[idx] = dervDict[parmName] |
---|
1882 | except ValueError: |
---|
1883 | pass |
---|
1884 | if 'U' in varyList: |
---|
1885 | dMdv[varyList.index('U')] += dsdU*dervDict['sig'] |
---|
1886 | if 'V' in varyList: |
---|
1887 | dMdv[varyList.index('V')] += dsdV*dervDict['sig'] |
---|
1888 | if 'W' in varyList: |
---|
1889 | dMdv[varyList.index('W')] += dsdW*dervDict['sig'] |
---|
1890 | if 'X' in varyList: |
---|
1891 | dMdv[varyList.index('X')] += dgdX*dervDict['gam'] |
---|
1892 | if 'Y' in varyList: |
---|
1893 | dMdv[varyList.index('Y')] += dgdY*dervDict['gam'] |
---|
1894 | if 'Z' in varyList: |
---|
1895 | dMdv[varyList.index('Z')] += dgdZ*dervDict['gam'] |
---|
1896 | if 'alpha-0' in varyList: |
---|
1897 | dMdv[varyList.index('alpha-0')] += dada0*dervDict['alp'] |
---|
1898 | if 'alpha-1' in varyList: |
---|
1899 | dMdv[varyList.index('alpha-1')] += dada1*dervDict['alp'] |
---|
1900 | if 'beta-0' in varyList: |
---|
1901 | dMdv[varyList.index('beta-0')] += dbdb0*dervDict['bet'] |
---|
1902 | if 'beta-1' in varyList: |
---|
1903 | dMdv[varyList.index('beta-1')] += dbdb1*dervDict['bet'] |
---|
1904 | iPeak += 1 |
---|
1905 | except KeyError: #no more peaks to process |
---|
1906 | break |
---|
1907 | else: |
---|
1908 | Pdabc = parmDict['Pdabc'] |
---|
1909 | difC = parmDict['difC'] |
---|
1910 | iPeak = 0 |
---|
1911 | while True: |
---|
1912 | try: |
---|
1913 | pos = parmDict['pos'+str(iPeak)] |
---|
1914 | tof = pos-parmDict['Zero'] |
---|
1915 | dsp = tof/difC |
---|
1916 | intens = parmDict['int'+str(iPeak)] |
---|
1917 | alpName = 'alp'+str(iPeak) |
---|
1918 | if alpName in varyList or not peakInstPrmMode: |
---|
1919 | alp = parmDict[alpName] |
---|
1920 | else: |
---|
1921 | if len(Pdabc): |
---|
1922 | alp = np.interp(dsp,Pdabc[0],Pdabc[1]) |
---|
1923 | dada0 = 0 |
---|
1924 | else: |
---|
1925 | alp = G2mth.getTOFalpha(parmDict,dsp) |
---|
1926 | dada0 = G2mth.getTOFalphaDeriv(dsp) |
---|
1927 | betName = 'bet'+str(iPeak) |
---|
1928 | if betName in varyList or not peakInstPrmMode: |
---|
1929 | bet = parmDict[betName] |
---|
1930 | else: |
---|
1931 | if len(Pdabc): |
---|
1932 | bet = np.interp(dsp,Pdabc[0],Pdabc[2]) |
---|
1933 | dbdb0 = dbdb1 = dbdb2 = 0 |
---|
1934 | else: |
---|
1935 | bet = G2mth.getTOFbeta(parmDict,dsp) |
---|
1936 | dbdb0,dbdb1,dbdb2 = G2mth.getTOFbetaDeriv(dsp) |
---|
1937 | sigName = 'sig'+str(iPeak) |
---|
1938 | if sigName in varyList or not peakInstPrmMode: |
---|
1939 | sig = parmDict[sigName] |
---|
1940 | dsds0 = dsds1 = dsds2 = dsds3 = 0 |
---|
1941 | else: |
---|
1942 | sig = G2mth.getTOFsig(parmDict,dsp) |
---|
1943 | dsds0,dsds1,dsds2,dsds3 = G2mth.getTOFsigDeriv(dsp) |
---|
1944 | gamName = 'gam'+str(iPeak) |
---|
1945 | if gamName in varyList or not peakInstPrmMode: |
---|
1946 | gam = parmDict[gamName] |
---|
1947 | dsdX = dsdY = dsdZ = 0 |
---|
1948 | else: |
---|
1949 | gam = G2mth.getTOFgamma(parmDict,dsp) |
---|
1950 | dsdX,dsdY,dsdZ = G2mth.getTOFgammaDeriv(dsp) |
---|
1951 | gam = max(gam,0.001) #avoid neg gamma |
---|
1952 | Wd,fmin,fmax = getWidthsTOF(pos,alp,bet,sig,gam) |
---|
1953 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1954 | lenX = len(xdata) |
---|
1955 | if not iBeg: |
---|
1956 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1957 | elif iBeg == lenX: |
---|
1958 | iFin = iBeg |
---|
1959 | else: |
---|
1960 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1961 | if not iBeg+iFin: #peak below low limit |
---|
1962 | iPeak += 1 |
---|
1963 | continue |
---|
1964 | elif not iBeg-iFin: #peak above high limit |
---|
1965 | break |
---|
1966 | dMdpk = np.zeros(shape=(7,len(xdata))) |
---|
1967 | dMdipk = getdEpsVoigt(pos,alp,bet,sig,gam,xdata[iBeg:iFin]) |
---|
1968 | for i in range(1,6): |
---|
1969 | dMdpk[i][iBeg:iFin] += intens*dMdipk[i] |
---|
1970 | dMdpk[0][iBeg:iFin] += dMdipk[0] |
---|
1971 | dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'alp':dMdpk[2],'bet':dMdpk[3],'sig':dMdpk[4],'gam':dMdpk[5]} |
---|
1972 | for parmName in ['pos','int','alp','bet','sig','gam']: |
---|
1973 | try: |
---|
1974 | idx = varyList.index(parmName+str(iPeak)) |
---|
1975 | dMdv[idx] = dervDict[parmName] |
---|
1976 | except ValueError: |
---|
1977 | pass |
---|
1978 | if 'alpha' in varyList: |
---|
1979 | dMdv[varyList.index('alpha')] += dada0*dervDict['alp'] |
---|
1980 | if 'beta-0' in varyList: |
---|
1981 | dMdv[varyList.index('beta-0')] += dbdb0*dervDict['bet'] |
---|
1982 | if 'beta-1' in varyList: |
---|
1983 | dMdv[varyList.index('beta-1')] += dbdb1*dervDict['bet'] |
---|
1984 | if 'beta-q' in varyList: |
---|
1985 | dMdv[varyList.index('beta-q')] += dbdb2*dervDict['bet'] |
---|
1986 | if 'sig-0' in varyList: |
---|
1987 | dMdv[varyList.index('sig-0')] += dsds0*dervDict['sig'] |
---|
1988 | if 'sig-1' in varyList: |
---|
1989 | dMdv[varyList.index('sig-1')] += dsds1*dervDict['sig'] |
---|
1990 | if 'sig-2' in varyList: |
---|
1991 | dMdv[varyList.index('sig-2')] += dsds2*dervDict['sig'] |
---|
1992 | if 'sig-q' in varyList: |
---|
1993 | dMdv[varyList.index('sig-q')] += dsds3*dervDict['sig'] |
---|
1994 | if 'X' in varyList: |
---|
1995 | dMdv[varyList.index('X')] += dsdX*dervDict['gam'] |
---|
1996 | if 'Y' in varyList: |
---|
1997 | dMdv[varyList.index('Y')] += dsdY*dervDict['gam'] |
---|
1998 | if 'Z' in varyList: |
---|
1999 | dMdv[varyList.index('Z')] += dsdZ*dervDict['gam'] |
---|
2000 | iPeak += 1 |
---|
2001 | except KeyError: #no more peaks to process |
---|
2002 | break |
---|
2003 | if 'BF mult' in varyList: |
---|
2004 | dMdv[varyList.index('BF mult')] = fixback |
---|
2005 | |
---|
2006 | return dMdv |
---|
2007 | |
---|
2008 | def Dict2Values(parmdict, varylist): |
---|
2009 | '''Use before call to leastsq to setup list of values for the parameters |
---|
2010 | in parmdict, as selected by key in varylist''' |
---|
2011 | return [parmdict[key] for key in varylist] |
---|
2012 | |
---|
2013 | def Values2Dict(parmdict, varylist, values): |
---|
2014 | ''' Use after call to leastsq to update the parameter dictionary with |
---|
2015 | values corresponding to keys in varylist''' |
---|
2016 | parmdict.update(zip(varylist,values)) |
---|
2017 | |
---|
2018 | def SetBackgroundParms(Background): |
---|
2019 | 'Loads background parameters into dicts/lists to create varylist & parmdict' |
---|
2020 | if len(Background) == 1: # fix up old backgrounds |
---|
2021 | Background.append({'nDebye':0,'debyeTerms':[]}) |
---|
2022 | bakType,bakFlag = Background[0][:2] |
---|
2023 | backVals = Background[0][3:] |
---|
2024 | backNames = ['Back;'+str(i) for i in range(len(backVals))] |
---|
2025 | Debye = Background[1] #also has background peaks stuff |
---|
2026 | backDict = dict(zip(backNames,backVals)) |
---|
2027 | backVary = [] |
---|
2028 | if bakFlag: |
---|
2029 | backVary = backNames |
---|
2030 | |
---|
2031 | backDict['nDebye'] = Debye['nDebye'] |
---|
2032 | debyeDict = {} |
---|
2033 | debyeList = [] |
---|
2034 | for i in range(Debye['nDebye']): |
---|
2035 | debyeNames = ['DebyeA;'+str(i),'DebyeR;'+str(i),'DebyeU;'+str(i)] |
---|
2036 | debyeDict.update(dict(zip(debyeNames,Debye['debyeTerms'][i][::2]))) |
---|
2037 | debyeList += zip(debyeNames,Debye['debyeTerms'][i][1::2]) |
---|
2038 | debyeVary = [] |
---|
2039 | for item in debyeList: |
---|
2040 | if item[1]: |
---|
2041 | debyeVary.append(item[0]) |
---|
2042 | backDict.update(debyeDict) |
---|
2043 | backVary += debyeVary |
---|
2044 | |
---|
2045 | backDict['nPeaks'] = Debye['nPeaks'] |
---|
2046 | peaksDict = {} |
---|
2047 | peaksList = [] |
---|
2048 | for i in range(Debye['nPeaks']): |
---|
2049 | peaksNames = ['BkPkpos;'+str(i),'BkPkint;'+str(i),'BkPksig;'+str(i),'BkPkgam;'+str(i)] |
---|
2050 | peaksDict.update(dict(zip(peaksNames,Debye['peaksList'][i][::2]))) |
---|
2051 | peaksList += zip(peaksNames,Debye['peaksList'][i][1::2]) |
---|
2052 | peaksVary = [] |
---|
2053 | for item in peaksList: |
---|
2054 | if item[1]: |
---|
2055 | peaksVary.append(item[0]) |
---|
2056 | backDict.update(peaksDict) |
---|
2057 | backVary += peaksVary |
---|
2058 | if 'background PWDR' in Background[1]: |
---|
2059 | backDict['Back File'] = Background[1]['background PWDR'][0] |
---|
2060 | backDict['BF mult'] = Background[1]['background PWDR'][1] |
---|
2061 | if len(Background[1]['background PWDR']) > 2: |
---|
2062 | if Background[1]['background PWDR'][2]: |
---|
2063 | backVary += ['BF mult',] |
---|
2064 | return bakType,backDict,backVary |
---|
2065 | |
---|
2066 | def DoCalibInst(IndexPeaks,Inst): |
---|
2067 | |
---|
2068 | def SetInstParms(): |
---|
2069 | dataType = Inst['Type'][0] |
---|
2070 | insVary = [] |
---|
2071 | insNames = [] |
---|
2072 | insVals = [] |
---|
2073 | for parm in Inst: |
---|
2074 | insNames.append(parm) |
---|
2075 | insVals.append(Inst[parm][1]) |
---|
2076 | if parm in ['Lam','difC','difA','difB','Zero','2-theta','XE','YE','ZE']: |
---|
2077 | if Inst[parm][2]: |
---|
2078 | insVary.append(parm) |
---|
2079 | instDict = dict(zip(insNames,insVals)) |
---|
2080 | return dataType,instDict,insVary |
---|
2081 | |
---|
2082 | def GetInstParms(parmDict,Inst,varyList): |
---|
2083 | for name in Inst: |
---|
2084 | Inst[name][1] = parmDict[name] |
---|
2085 | |
---|
2086 | def InstPrint(Inst,sigDict): |
---|
2087 | print ('Instrument Parameters:') |
---|
2088 | if 'C' in Inst['Type'][0] or 'B' in Inst['Type'][0]: |
---|
2089 | ptfmt = "%12.6f" |
---|
2090 | else: |
---|
2091 | ptfmt = "%12.3f" |
---|
2092 | ptlbls = 'names :' |
---|
2093 | ptstr = 'values:' |
---|
2094 | sigstr = 'esds :' |
---|
2095 | for parm in Inst: |
---|
2096 | if parm in ['Lam','difC','difA','difB','Zero','2-theta','XE','YE','ZE']: |
---|
2097 | ptlbls += "%s" % (parm.center(12)) |
---|
2098 | ptstr += ptfmt % (Inst[parm][1]) |
---|
2099 | if parm in sigDict: |
---|
2100 | sigstr += ptfmt % (sigDict[parm]) |
---|
2101 | else: |
---|
2102 | sigstr += 12*' ' |
---|
2103 | print (ptlbls) |
---|
2104 | print (ptstr) |
---|
2105 | print (sigstr) |
---|
2106 | |
---|
2107 | def errPeakPos(values,peakDsp,peakPos,peakWt,dataType,parmDict,varyList): |
---|
2108 | parmDict.update(zip(varyList,values)) |
---|
2109 | return np.sqrt(peakWt)*(G2lat.getPeakPos(dataType,parmDict,peakDsp)-peakPos) |
---|
2110 | |
---|
2111 | peakPos = [] |
---|
2112 | peakDsp = [] |
---|
2113 | peakWt = [] |
---|
2114 | for peak,sig in zip(IndexPeaks[0],IndexPeaks[1]): |
---|
2115 | if peak[2] and peak[3] and sig > 0.: |
---|
2116 | peakPos.append(peak[0]) |
---|
2117 | peakDsp.append(peak[-1]) #d-calc |
---|
2118 | # peakWt.append(peak[-1]**2/sig**2) #weight by d**2 |
---|
2119 | peakWt.append(1./(sig*peak[-1])) # |
---|
2120 | peakPos = np.array(peakPos) |
---|
2121 | peakDsp = np.array(peakDsp) |
---|
2122 | peakWt = np.array(peakWt) |
---|
2123 | dataType,insDict,insVary = SetInstParms() |
---|
2124 | parmDict = {} |
---|
2125 | parmDict.update(insDict) |
---|
2126 | varyList = insVary |
---|
2127 | if not len(varyList): |
---|
2128 | G2fil.G2Print ('**** ERROR - nothing to refine! ****') |
---|
2129 | return False |
---|
2130 | while True: |
---|
2131 | begin = time.time() |
---|
2132 | values = np.array(Dict2Values(parmDict, varyList)) |
---|
2133 | result = so.leastsq(errPeakPos,values,full_output=True,ftol=0.000001, |
---|
2134 | args=(peakDsp,peakPos,peakWt,dataType,parmDict,varyList)) |
---|
2135 | ncyc = int(result[2]['nfev']/2) |
---|
2136 | runtime = time.time()-begin |
---|
2137 | chisq = np.sum(result[2]['fvec']**2) |
---|
2138 | Values2Dict(parmDict, varyList, result[0]) |
---|
2139 | GOF = chisq/(len(peakPos)-len(varyList)) #reduced chi^2 |
---|
2140 | G2fil.G2Print ('Number of function calls: %d Number of observations: %d Number of parameters: %d'%(result[2]['nfev'],len(peakPos),len(varyList))) |
---|
2141 | G2fil.G2Print ('calib time = %8.3fs, %8.3fs/cycle'%(runtime,runtime/ncyc)) |
---|
2142 | G2fil.G2Print ('chi**2 = %12.6g, reduced chi**2 = %6.2f'%(chisq,GOF)) |
---|
2143 | try: |
---|
2144 | sig = np.sqrt(np.diag(result[1])*GOF) |
---|
2145 | if np.any(np.isnan(sig)): |
---|
2146 | G2fil.G2Print ('*** Least squares aborted - some invalid esds possible ***') |
---|
2147 | break #refinement succeeded - finish up! |
---|
2148 | except ValueError: #result[1] is None on singular matrix |
---|
2149 | G2fil.G2Print ('**** Refinement failed - singular matrix ****') |
---|
2150 | |
---|
2151 | sigDict = dict(zip(varyList,sig)) |
---|
2152 | GetInstParms(parmDict,Inst,varyList) |
---|
2153 | InstPrint(Inst,sigDict) |
---|
2154 | return True |
---|
2155 | |
---|
2156 | def getHeaderInfo(dataType): |
---|
2157 | '''Provide parameter name, label name and formatting information for the |
---|
2158 | contents of the Peak Table and where used in DoPeakFit |
---|
2159 | ''' |
---|
2160 | names = ['pos','int'] |
---|
2161 | lnames = ['position','intensity'] |
---|
2162 | if 'LF' in dataType: |
---|
2163 | names = ['int','sig','gam','damp','asym','l','ttheta'] |
---|
2164 | lnames = ['intensity','sigma','gamma','satellite\ndamping', |
---|
2165 | 'satellite\nasym','00l', |
---|
2166 | #'2theta ' |
---|
2167 | '2\u03B8' |
---|
2168 | ] |
---|
2169 | fmt = ["%10.2f","%10.3f","%10.3f","%10.3f","%10.3f","%4.0f","%8.3f"] |
---|
2170 | elif 'C' in dataType: |
---|
2171 | names += ['sig','gam'] |
---|
2172 | lnames += ['sigma','gamma'] |
---|
2173 | fmt = ["%10.5f","%10.1f","%10.3f","%10.3f"] |
---|
2174 | elif 'T' in dataType: |
---|
2175 | names += ['alp','bet','sig','gam'] |
---|
2176 | lnames += ['alpha','beta','sigma','gamma'] |
---|
2177 | fmt = ["%10.2f","%10.4f","%8.3f","%8.5f","%10.3f","%10.3f"] |
---|
2178 | elif 'E' in dataType: |
---|
2179 | names += ['sig'] |
---|
2180 | lnames += ['sigma'] |
---|
2181 | fmt = ["%10.5f","%10.1f","%8.3f"] |
---|
2182 | else: # 'B' |
---|
2183 | names += ['alp','bet','sig','gam'] |
---|
2184 | lnames += ['alpha','beta','sigma','gamma'] |
---|
2185 | fmt = ["%10.5f","%10.1f","%8.2f","%8.4f","%10.3f","%10.3f"] |
---|
2186 | return names, fmt, lnames |
---|
2187 | |
---|
2188 | def DoPeakFit(FitPgm,Peaks,Background,Limits,Inst,Inst2,data,fixback=None,prevVaryList=[], |
---|
2189 | oneCycle=False,controls=None,wtFactor=1.0,dlg=None,noFit=False): |
---|
2190 | '''Called to perform a peak fit, refining the selected items in the peak |
---|
2191 | table as well as selected items in the background. |
---|
2192 | |
---|
2193 | :param str FitPgm: type of fit to perform. At present this is ignored. |
---|
2194 | :param list Peaks: a list of peaks. Each peak entry is a list with paired values: |
---|
2195 | The number of pairs depends on the data type (see :func:`getHeaderInfo`). |
---|
2196 | For CW data there are |
---|
2197 | four values each followed by a refine flag where the values are: position, intensity, |
---|
2198 | sigma (Gaussian width) and gamma (Lorentzian width). From the Histogram/"Peak List" |
---|
2199 | tree entry, dict item "peaks". For some types of fits, overall parameters are placed |
---|
2200 | in a dict entry. |
---|
2201 | :param list Background: describes the background. List with two items. |
---|
2202 | Item 0 specifies a background model and coefficients. Item 1 is a dict. |
---|
2203 | From the Histogram/Background tree entry. |
---|
2204 | :param list Limits: min and max x-value to use |
---|
2205 | :param dict Inst: Instrument parameters |
---|
2206 | :param dict Inst2: more Instrument parameters |
---|
2207 | :param numpy.array data: a 5xn array. data[0] is the x-values, |
---|
2208 | data[1] is the y-values, data[2] are weight values, data[3], [4] and [5] are |
---|
2209 | calc, background and difference intensities, respectively. |
---|
2210 | :param array fixback: fixed background array; same size as data[0-5] |
---|
2211 | :param list prevVaryList: Used in sequential refinements to override the |
---|
2212 | variable list. Defaults as an empty list. |
---|
2213 | :param bool oneCycle: True if only one cycle of fitting should be performed |
---|
2214 | :param dict controls: a dict specifying two values, Ftol = controls['min dM/M'] |
---|
2215 | and derivType = controls['deriv type']. If None default values are used. |
---|
2216 | :param float wtFactor: weight multiplier; = 1.0 by default |
---|
2217 | :param wx.Dialog dlg: A dialog box that is updated with progress from the fit. |
---|
2218 | Defaults to None, which means no updates are done. |
---|
2219 | :param bool noFit: When noFit is True, a refinement is not performed. Default |
---|
2220 | is False. |
---|
2221 | |
---|
2222 | ''' |
---|
2223 | def GetBackgroundParms(parmList,Background): |
---|
2224 | iBak = 0 |
---|
2225 | while True: |
---|
2226 | try: |
---|
2227 | bakName = 'Back;'+str(iBak) |
---|
2228 | Background[0][iBak+3] = parmList[bakName] |
---|
2229 | iBak += 1 |
---|
2230 | except KeyError: |
---|
2231 | break |
---|
2232 | iDb = 0 |
---|
2233 | while True: |
---|
2234 | names = ['DebyeA;','DebyeR;','DebyeU;'] |
---|
2235 | try: |
---|
2236 | for i,name in enumerate(names): |
---|
2237 | val = parmList[name+str(iDb)] |
---|
2238 | Background[1]['debyeTerms'][iDb][2*i] = val |
---|
2239 | iDb += 1 |
---|
2240 | except KeyError: |
---|
2241 | break |
---|
2242 | iDb = 0 |
---|
2243 | while True: |
---|
2244 | names = ['BkPkpos;','BkPkint;','BkPksig;','BkPkgam;'] |
---|
2245 | try: |
---|
2246 | for i,name in enumerate(names): |
---|
2247 | val = parmList[name+str(iDb)] |
---|
2248 | Background[1]['peaksList'][iDb][2*i] = val |
---|
2249 | iDb += 1 |
---|
2250 | except KeyError: |
---|
2251 | break |
---|
2252 | if 'BF mult' in parmList: |
---|
2253 | Background[1]['background PWDR'][1] = parmList['BF mult'] |
---|
2254 | |
---|
2255 | def BackgroundPrint(Background,sigDict): |
---|
2256 | print ('Background coefficients for '+Background[0][0]+' function') |
---|
2257 | ptfmt = "%12.5f" |
---|
2258 | ptstr = 'value: ' |
---|
2259 | sigstr = 'esd : ' |
---|
2260 | for i,back in enumerate(Background[0][3:]): |
---|
2261 | ptstr += ptfmt % (back) |
---|
2262 | if Background[0][1]: |
---|
2263 | prm = 'Back;'+str(i) |
---|
2264 | if prm in sigDict: |
---|
2265 | sigstr += ptfmt % (sigDict[prm]) |
---|
2266 | else: |
---|
2267 | sigstr += " "*12 |
---|
2268 | if len(ptstr) > 75: |
---|
2269 | print (ptstr) |
---|
2270 | if Background[0][1]: print (sigstr) |
---|
2271 | ptstr = 'value: ' |
---|
2272 | sigstr = 'esd : ' |
---|
2273 | if len(ptstr) > 8: |
---|
2274 | print (ptstr) |
---|
2275 | if Background[0][1]: print (sigstr) |
---|
2276 | |
---|
2277 | if Background[1]['nDebye']: |
---|
2278 | parms = ['DebyeA;','DebyeR;','DebyeU;'] |
---|
2279 | print ('Debye diffuse scattering coefficients') |
---|
2280 | ptfmt = "%12.5f" |
---|
2281 | print (' term DebyeA esd DebyeR esd DebyeU esd') |
---|
2282 | for term in range(Background[1]['nDebye']): |
---|
2283 | line = ' term %d'%(term) |
---|
2284 | for ip,name in enumerate(parms): |
---|
2285 | line += ptfmt%(Background[1]['debyeTerms'][term][2*ip]) |
---|
2286 | if name+str(term) in sigDict: |
---|
2287 | line += ptfmt%(sigDict[name+str(term)]) |
---|
2288 | else: |
---|
2289 | line += " "*12 |
---|
2290 | print (line) |
---|
2291 | if Background[1]['nPeaks']: |
---|
2292 | print ('Coefficients for Background Peaks') |
---|
2293 | ptfmt = "%15.3f" |
---|
2294 | for j,pl in enumerate(Background[1]['peaksList']): |
---|
2295 | names = 'peak %3d:'%(j+1) |
---|
2296 | ptstr = 'values :' |
---|
2297 | sigstr = 'esds :' |
---|
2298 | for i,lbl in enumerate(['BkPkpos','BkPkint','BkPksig','BkPkgam']): |
---|
2299 | val = pl[2*i] |
---|
2300 | prm = lbl+";"+str(j) |
---|
2301 | names += '%15s'%(prm) |
---|
2302 | ptstr += ptfmt%(val) |
---|
2303 | if prm in sigDict: |
---|
2304 | sigstr += ptfmt%(sigDict[prm]) |
---|
2305 | else: |
---|
2306 | sigstr += " "*15 |
---|
2307 | print (names) |
---|
2308 | print (ptstr) |
---|
2309 | print (sigstr) |
---|
2310 | if 'BF mult' in sigDict: |
---|
2311 | print('Background file mult: %.3f(%d)'%(Background[1]['background PWDR'][1],int(1000*sigDict['BF mult']))) |
---|
2312 | |
---|
2313 | def SetInstParms(Inst): |
---|
2314 | dataType = Inst['Type'][0] |
---|
2315 | insVary = [] |
---|
2316 | insNames = [] |
---|
2317 | insVals = [] |
---|
2318 | for parm in Inst: |
---|
2319 | insNames.append(parm) |
---|
2320 | insVals.append(Inst[parm][1]) |
---|
2321 | if parm in ['U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha','A','B','C', |
---|
2322 | 'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','alpha-0','alpha-1'] and Inst[parm][2]: |
---|
2323 | insVary.append(parm) |
---|
2324 | instDict = dict(zip(insNames,insVals)) |
---|
2325 | if 'SH/L' in instDict: |
---|
2326 | instDict['SH/L'] = max(instDict['SH/L'],0.002) |
---|
2327 | return dataType,instDict,insVary |
---|
2328 | |
---|
2329 | def GetPkInstParms(parmDict,Inst,varyList): |
---|
2330 | for name in Inst: |
---|
2331 | Inst[name][1] = parmDict[name] |
---|
2332 | iPeak = 0 |
---|
2333 | while True: |
---|
2334 | try: |
---|
2335 | sigName = 'sig'+str(iPeak) |
---|
2336 | pos = parmDict['pos'+str(iPeak)] |
---|
2337 | if sigName not in varyList and peakInstPrmMode: |
---|
2338 | if 'T' in Inst['Type'][0]: |
---|
2339 | dsp = G2lat.Pos2dsp(Inst,pos) |
---|
2340 | parmDict[sigName] = G2mth.getTOFsig(parmDict,dsp) |
---|
2341 | if 'E' in Inst['Type'][0]: |
---|
2342 | parmDict[sigName] = G2mth.getEDsig(parmDict,pos) |
---|
2343 | else: |
---|
2344 | parmDict[sigName] = G2mth.getCWsig(parmDict,pos) |
---|
2345 | gamName = 'gam'+str(iPeak) |
---|
2346 | if gamName not in varyList and peakInstPrmMode: |
---|
2347 | if 'T' in Inst['Type'][0]: |
---|
2348 | dsp = G2lat.Pos2dsp(Inst,pos) |
---|
2349 | parmDict[gamName] = G2mth.getTOFgamma(parmDict,dsp) |
---|
2350 | else: |
---|
2351 | parmDict[gamName] = G2mth.getCWgam(parmDict,pos) |
---|
2352 | iPeak += 1 |
---|
2353 | except KeyError: |
---|
2354 | break |
---|
2355 | |
---|
2356 | def InstPrint(Inst,sigDict): |
---|
2357 | print ('Instrument Parameters:') |
---|
2358 | ptfmt = "%12.6f" |
---|
2359 | ptlbls = 'names :' |
---|
2360 | ptstr = 'values:' |
---|
2361 | sigstr = 'esds :' |
---|
2362 | for parm in Inst: |
---|
2363 | if parm in ['U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha','A','B','C', |
---|
2364 | 'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','alpha-0','alpha-1']: |
---|
2365 | ptlbls += "%s" % (parm.center(12)) |
---|
2366 | ptstr += ptfmt % (Inst[parm][1]) |
---|
2367 | if parm in sigDict: |
---|
2368 | sigstr += ptfmt % (sigDict[parm]) |
---|
2369 | else: |
---|
2370 | sigstr += 12*' ' |
---|
2371 | print (ptlbls) |
---|
2372 | print (ptstr) |
---|
2373 | print (sigstr) |
---|
2374 | |
---|
2375 | def SetPeaksParms(dataType,Peaks): |
---|
2376 | '''Set the contents of peakDict from list Peaks |
---|
2377 | ''' |
---|
2378 | peakDict = {} |
---|
2379 | peakVary = [] |
---|
2380 | names,_,_ = getHeaderInfo(dataType) |
---|
2381 | if 'LF' in dataType: |
---|
2382 | off = 2 |
---|
2383 | names = names[:-2] # drop 00l & 2theta from header |
---|
2384 | else: |
---|
2385 | off = 0 |
---|
2386 | for i,peak in enumerate(Peaks): |
---|
2387 | if type(peak) is dict: |
---|
2388 | peakDict.update(peak) |
---|
2389 | continue |
---|
2390 | if 'LF' in dataType: peakDict['l'+str(i)] = peak[12] |
---|
2391 | for j,name in enumerate(names): |
---|
2392 | parName = name+str(i) |
---|
2393 | peakDict[parName] = peak[off+2*j] |
---|
2394 | if peak[off+2*j+1]: |
---|
2395 | peakVary.append(parName) |
---|
2396 | return peakDict,peakVary |
---|
2397 | |
---|
2398 | def GetPeaksParms(Inst,parmDict,Peaks,varyList): |
---|
2399 | '''Put values into the Peaks list from the refinement results from inside |
---|
2400 | the parmDict array |
---|
2401 | ''' |
---|
2402 | names,_,_ = getHeaderInfo(Inst['Type'][0]) |
---|
2403 | off = 0 |
---|
2404 | if 'LF' in Inst['Type'][0]: |
---|
2405 | off = 2 |
---|
2406 | if 'clat' in varyList: |
---|
2407 | Peaks[-1]['clat'] = parmDict['clat'] |
---|
2408 | names = names[:-1] # drop 2nd 2theta value |
---|
2409 | for i,peak in enumerate(Peaks): |
---|
2410 | if type(peak) is dict: continue |
---|
2411 | parmDict['ttheta'+str(i)] = peak[-1] |
---|
2412 | for i,peak in enumerate(Peaks): |
---|
2413 | if type(peak) is dict: |
---|
2414 | continue |
---|
2415 | for j in range(len(names)): |
---|
2416 | parName = names[j]+str(i) |
---|
2417 | if parName in varyList or not peakInstPrmMode: |
---|
2418 | peak[2*j+off] = parmDict[parName] |
---|
2419 | if 'pos'+str(i) not in parmDict: continue |
---|
2420 | pos = parmDict['pos'+str(i)] |
---|
2421 | if 'LF' in Inst['Type'][0]: |
---|
2422 | peak[0] = pos |
---|
2423 | peak[-1] = pos |
---|
2424 | if 'difC' in Inst: |
---|
2425 | dsp = pos/Inst['difC'][1] |
---|
2426 | for j in range(len(names)): |
---|
2427 | parName = names[j]+str(i) |
---|
2428 | if peak[2*j+off + 1] or not peakInstPrmMode: continue |
---|
2429 | if 'alp' in parName: |
---|
2430 | if 'T' in Inst['Type'][0]: |
---|
2431 | peak[2*j+off] = G2mth.getTOFalpha(parmDict,dsp) |
---|
2432 | else: #'B' |
---|
2433 | peak[2*j+off] = G2mth.getPinkalpha(parmDict,pos) |
---|
2434 | elif 'bet' in parName: |
---|
2435 | if 'T' in Inst['Type'][0]: |
---|
2436 | peak[2*j+off] = G2mth.getTOFbeta(parmDict,dsp) |
---|
2437 | else: #'B' |
---|
2438 | peak[2*j+off] = G2mth.getPinkbeta(parmDict,pos) |
---|
2439 | elif 'sig' in parName: |
---|
2440 | if 'T' in Inst['Type'][0]: |
---|
2441 | peak[2*j+off] = G2mth.getTOFsig(parmDict,dsp) |
---|
2442 | elif 'E' in Inst['Type'][0]: |
---|
2443 | peak[2*j+off] = G2mth.getEDsig(parmDict,pos) |
---|
2444 | else: #'C' & 'B' |
---|
2445 | peak[2*j+off] = G2mth.getCWsig(parmDict,pos) |
---|
2446 | elif 'gam' in parName: |
---|
2447 | if 'T' in Inst['Type'][0]: |
---|
2448 | peak[2*j+off] = G2mth.getTOFgamma(parmDict,dsp) |
---|
2449 | else: #'C' & 'B' |
---|
2450 | peak[2*j+off] = G2mth.getCWgam(parmDict,pos) |
---|
2451 | |
---|
2452 | def PeaksPrint(dataType,parmDict,sigDict,varyList,ptsperFW): |
---|
2453 | if 'clat' in varyList: |
---|
2454 | print('c = {:.6f} esd {:.6f}'.format(parmDict['clat'],sigDict['clat'])) |
---|
2455 | print ('Peak coefficients:') |
---|
2456 | names,fmt,_ = getHeaderInfo(dataType) |
---|
2457 | head = 13*' ' |
---|
2458 | for name in names: |
---|
2459 | if name == 'l': |
---|
2460 | head += name |
---|
2461 | elif name == 'ttheta': |
---|
2462 | head += name.center(8) |
---|
2463 | elif name in ['alp','bet']: |
---|
2464 | head += name.center(8)+'esd'.center(8) |
---|
2465 | else: |
---|
2466 | head += name.center(10)+'esd'.center(10) |
---|
2467 | head += 'bins'.center(12) |
---|
2468 | print (head) |
---|
2469 | ptfmt = dict(zip(names,fmt)) |
---|
2470 | for i,peak in enumerate(Peaks): |
---|
2471 | if type(peak) is dict: |
---|
2472 | continue |
---|
2473 | ptstr = ':' |
---|
2474 | for j in range(len(names)): |
---|
2475 | name = names[j] |
---|
2476 | parName = name+str(i) |
---|
2477 | if parName not in parmDict: continue |
---|
2478 | ptstr += ptfmt[name] % (parmDict[parName]) |
---|
2479 | if name == 'l' or name == 'ttheta': |
---|
2480 | continue |
---|
2481 | if parName in varyList: |
---|
2482 | ptstr += ptfmt[name] % (sigDict[parName]) |
---|
2483 | else: |
---|
2484 | if name in ['alp','bet']: |
---|
2485 | ptstr += 8*' ' |
---|
2486 | else: |
---|
2487 | ptstr += 10*' ' |
---|
2488 | ptstr += '%8.1f'%(ptsperFW[i]) |
---|
2489 | print ('%s'%(('Peak'+str(i+1)).center(8)),ptstr) |
---|
2490 | |
---|
2491 | def devPeakProfile(values,xdata,ydata,fixback, weights,dataType,parmdict,varylist,bakType,dlg): |
---|
2492 | '''Computes a matrix where each row is the derivative of the calc-obs |
---|
2493 | values (see :func:`errPeakProfile`) with respect to each parameter |
---|
2494 | in backVary,insVary,peakVary. Used for peak fitting. |
---|
2495 | ''' |
---|
2496 | parmdict.update(zip(varylist,values)) |
---|
2497 | return np.sqrt(weights)*getPeakProfileDerv(dataType,parmdict,xdata,fixback,varylist,bakType) |
---|
2498 | |
---|
2499 | def errPeakProfile(values,xdata,ydata,fixback,weights,dataType,parmdict,varylist,bakType,dlg): |
---|
2500 | '''Computes a vector with the weighted calc-obs values differences |
---|
2501 | for peak fitting |
---|
2502 | ''' |
---|
2503 | parmdict.update(zip(varylist,values)) |
---|
2504 | M = np.sqrt(weights)*(getPeakProfile(dataType,parmdict,xdata,fixback,varylist,bakType)-ydata) |
---|
2505 | Rwp = min(100.,np.sqrt(np.sum(M**2)/np.sum(weights*ydata**2))*100.) |
---|
2506 | if dlg: |
---|
2507 | dlg.Raise() |
---|
2508 | GoOn = dlg.Update(int(Rwp),newmsg='%s%8.3f%s'%('Peak fit Rwp =',Rwp,'%'))[0] |
---|
2509 | if not GoOn: |
---|
2510 | return -M #abort!! |
---|
2511 | return M |
---|
2512 | |
---|
2513 | #---- beginning of DoPeakFit --------------------------------------------- |
---|
2514 | if controls: |
---|
2515 | Ftol = controls['min dM/M'] |
---|
2516 | else: |
---|
2517 | Ftol = 0.0001 |
---|
2518 | if oneCycle: |
---|
2519 | Ftol = 1.0 |
---|
2520 | x,y,w,yc,yb,yd = data #these are numpy arrays - remove masks! |
---|
2521 | if fixback is None: |
---|
2522 | fixback = np.zeros_like(y) |
---|
2523 | yc.fill(0.) #set calcd ones to zero |
---|
2524 | yb.fill(0.) |
---|
2525 | yd.fill(0.) |
---|
2526 | xBeg = np.searchsorted(x,Limits[0]) |
---|
2527 | xFin = np.searchsorted(x,Limits[1])+1 |
---|
2528 | # find out what is varied |
---|
2529 | bakType,bakDict,bakVary = SetBackgroundParms(Background) |
---|
2530 | dataType,insDict,insVary = SetInstParms(Inst) |
---|
2531 | peakDict,peakVary = SetPeaksParms(Inst['Type'][0],Peaks) |
---|
2532 | parmDict = {} |
---|
2533 | parmDict.update(bakDict) |
---|
2534 | parmDict.update(insDict) |
---|
2535 | parmDict.update(peakDict) |
---|
2536 | parmDict['Pdabc'] = [] #dummy Pdabc |
---|
2537 | parmDict.update(Inst2) #put in real one if there |
---|
2538 | if prevVaryList: |
---|
2539 | varyList = prevVaryList[:] |
---|
2540 | else: |
---|
2541 | varyList = bakVary+insVary+peakVary |
---|
2542 | if 'LF' in Inst['Type'][0] and Peaks: |
---|
2543 | if Peaks[-1].get('clat-ref'): varyList += ['clat'] |
---|
2544 | fullvaryList = varyList[:] |
---|
2545 | if not peakInstPrmMode: |
---|
2546 | for v in ('U','V','W','X','Y','Z','alpha','alpha-0','alpha-1','A','B','C', |
---|
2547 | 'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q',): |
---|
2548 | if v in varyList: |
---|
2549 | raise Exception('Instrumental profile terms cannot be varied '+ |
---|
2550 | 'after setPeakInstPrmMode(False) is used') |
---|
2551 | if 'LF' in Inst['Type'][0]: |
---|
2552 | warn = [] |
---|
2553 | for v in ('U','V','W','X','Y','Z','alpha','alpha-0','alpha-1', |
---|
2554 | 'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q',): |
---|
2555 | if v in varyList: |
---|
2556 | warn.append(v) |
---|
2557 | del varyList[varyList.index(v)] |
---|
2558 | if warn: |
---|
2559 | print('Instrumental profile terms cannot be varied '+ |
---|
2560 | 'in Laue Fringe fits:',warn) |
---|
2561 | |
---|
2562 | while not noFit: |
---|
2563 | begin = time.time() |
---|
2564 | values = np.array(Dict2Values(parmDict, varyList)) |
---|
2565 | Rvals = {} |
---|
2566 | badVary = [] |
---|
2567 | try: |
---|
2568 | result = so.leastsq(errPeakProfile,values,Dfun=devPeakProfile,full_output=True,ftol=Ftol,col_deriv=True, |
---|
2569 | args=(x[xBeg:xFin],y[xBeg:xFin],fixback[xBeg:xFin],wtFactor*w[xBeg:xFin],dataType,parmDict,varyList,bakType,dlg)) |
---|
2570 | except Exception as msg: |
---|
2571 | if GSASIIpath.GetConfigValue('debug'): |
---|
2572 | print('peak fit failure\n',msg) |
---|
2573 | import traceback |
---|
2574 | print (traceback.format_exc()) |
---|
2575 | else: |
---|
2576 | print('peak fit failure') |
---|
2577 | return |
---|
2578 | ncyc = int(result[2]['nfev']/2) |
---|
2579 | runtime = time.time()-begin |
---|
2580 | chisq = np.sum(result[2]['fvec']**2) |
---|
2581 | Values2Dict(parmDict, varyList, result[0]) |
---|
2582 | Rvals['Rwp'] = np.sqrt(chisq/np.sum(wtFactor*w[xBeg:xFin]*y[xBeg:xFin]**2))*100. #to % |
---|
2583 | Rvals['GOF'] = chisq/(xFin-xBeg-len(varyList)) #reduced chi^2 |
---|
2584 | G2fil.G2Print ('Number of function calls: %d Number of observations: %d Number of parameters: %d'%(result[2]['nfev'],xFin-xBeg,len(varyList))) |
---|
2585 | if ncyc: |
---|
2586 | G2fil.G2Print ('fitpeak time = %8.3fs, %8.3fs/cycle'%(runtime,runtime/ncyc)) |
---|
2587 | G2fil.G2Print ('Rwp = %7.2f%%, chi**2 = %12.6g, reduced chi**2 = %6.2f'%(Rvals['Rwp'],chisq,Rvals['GOF'])) |
---|
2588 | sig = [0]*len(varyList) |
---|
2589 | if len(varyList) == 0: break # if nothing was refined |
---|
2590 | try: |
---|
2591 | sig = np.sqrt(np.diag(result[1])*Rvals['GOF']) |
---|
2592 | if np.any(np.isnan(sig)): |
---|
2593 | G2fil.G2Print ('*** Least squares aborted - some invalid esds possible ***') |
---|
2594 | break #refinement succeeded - finish up! |
---|
2595 | except ValueError: #result[1] is None on singular matrix |
---|
2596 | G2fil.G2Print ('**** Refinement failed - singular matrix ****') |
---|
2597 | Ipvt = result[2]['ipvt'] |
---|
2598 | for i,ipvt in enumerate(Ipvt): |
---|
2599 | if not np.sum(result[2]['fjac'],axis=1)[i]: |
---|
2600 | G2fil.G2Print ('Removing parameter: '+varyList[ipvt-1]) |
---|
2601 | badVary.append(varyList[ipvt-1]) |
---|
2602 | del(varyList[ipvt-1]) |
---|
2603 | break |
---|
2604 | else: # nothing removed |
---|
2605 | break |
---|
2606 | if dlg: dlg.Destroy() |
---|
2607 | yb[xBeg:xFin] = getBackground('',parmDict,bakType,dataType,x[xBeg:xFin],fixback[xBeg:xFin])[0] |
---|
2608 | yc[xBeg:xFin] = getPeakProfile(dataType,parmDict,x[xBeg:xFin],fixback[xBeg:xFin],varyList,bakType) |
---|
2609 | yd[xBeg:xFin] = y[xBeg:xFin]-yc[xBeg:xFin] |
---|
2610 | if noFit: |
---|
2611 | GetPeaksParms(Inst,parmDict,Peaks,varyList) |
---|
2612 | return |
---|
2613 | sigDict = dict(zip(varyList,sig)) |
---|
2614 | GetBackgroundParms(parmDict,Background) |
---|
2615 | if bakVary: BackgroundPrint(Background,sigDict) |
---|
2616 | GetPkInstParms(parmDict,Inst,varyList) |
---|
2617 | if insVary: InstPrint(Inst,sigDict) |
---|
2618 | GetPeaksParms(Inst,parmDict,Peaks,varyList) |
---|
2619 | binsperFWHM = [] |
---|
2620 | for peak in Peaks: |
---|
2621 | if type(peak) is dict: |
---|
2622 | continue |
---|
2623 | FWHM = getFWHM(peak[0],Inst) |
---|
2624 | try: |
---|
2625 | xpk = x.searchsorted(peak[0]) |
---|
2626 | cw = x[xpk]-x[xpk-1] |
---|
2627 | binsperFWHM.append(FWHM/cw) |
---|
2628 | except IndexError: |
---|
2629 | binsperFWHM.append(0.) |
---|
2630 | if peakVary: PeaksPrint(dataType,parmDict,sigDict,varyList,binsperFWHM) |
---|
2631 | if len(binsperFWHM): |
---|
2632 | if min(binsperFWHM) < 1.: |
---|
2633 | G2fil.G2Print ('*** Warning: calculated peak widths are too narrow to refine profile coefficients ***') |
---|
2634 | if 'T' in Inst['Type'][0]: |
---|
2635 | G2fil.G2Print (' Manually increase sig-0, 1, or 2 in Instrument Parameters') |
---|
2636 | else: |
---|
2637 | G2fil.G2Print (' Manually increase W in Instrument Parameters') |
---|
2638 | elif min(binsperFWHM) < 4.: |
---|
2639 | G2fil.G2Print ('*** Warning: data binning yields too few data points across peak FWHM for reliable Rietveld refinement ***') |
---|
2640 | G2fil.G2Print ('*** recommended is 6-10; you have %.2f ***'%(min(binsperFWHM))) |
---|
2641 | return sigDict,result,sig,Rvals,varyList,parmDict,fullvaryList,badVary |
---|
2642 | |
---|
2643 | def calcIncident(Iparm,xdata): |
---|
2644 | 'needs a doc string' |
---|
2645 | |
---|
2646 | def IfunAdv(Iparm,xdata): |
---|
2647 | Itype = Iparm['Itype'] |
---|
2648 | Icoef = Iparm['Icoeff'] |
---|
2649 | DYI = np.ones((12,xdata.shape[0])) |
---|
2650 | YI = np.ones_like(xdata)*Icoef[0] |
---|
2651 | |
---|
2652 | x = xdata/1000. #expressions are in ms |
---|
2653 | if Itype == 'Exponential': |
---|
2654 | for i in [1,3,5,7,9]: |
---|
2655 | Eterm = np.exp(-Icoef[i+1]*x**((i+1)/2)) |
---|
2656 | YI += Icoef[i]*Eterm |
---|
2657 | DYI[i] *= Eterm |
---|
2658 | DYI[i+1] *= -Icoef[i]*Eterm*x**((i+1)/2) |
---|
2659 | elif 'Maxwell'in Itype: |
---|
2660 | Eterm = np.exp(-Icoef[2]/x**2) |
---|
2661 | DYI[1] = Eterm/x**5 |
---|
2662 | DYI[2] = -Icoef[1]*DYI[1]/x**2 |
---|
2663 | YI += (Icoef[1]*Eterm/x**5) |
---|
2664 | if 'Exponential' in Itype: |
---|
2665 | for i in range(3,11,2): |
---|
2666 | Eterm = np.exp(-Icoef[i+1]*x**((i+1)/2)) |
---|
2667 | YI += Icoef[i]*Eterm |
---|
2668 | DYI[i] *= Eterm |
---|
2669 | DYI[i+1] *= -Icoef[i]*Eterm*x**((i+1)/2) |
---|
2670 | else: #Chebyschev |
---|
2671 | T = (2./x)-1. |
---|
2672 | Ccof = np.ones((12,xdata.shape[0])) |
---|
2673 | Ccof[1] = T |
---|
2674 | for i in range(2,12): |
---|
2675 | Ccof[i] = 2*T*Ccof[i-1]-Ccof[i-2] |
---|
2676 | for i in range(1,10): |
---|
2677 | YI += Ccof[i]*Icoef[i+2] |
---|
2678 | DYI[i+2] =Ccof[i] |
---|
2679 | return YI,DYI |
---|
2680 | |
---|
2681 | Iesd = np.array(Iparm['Iesd']) |
---|
2682 | Icovar = Iparm['Icovar'] |
---|
2683 | YI,DYI = IfunAdv(Iparm,xdata) |
---|
2684 | YI = np.where(YI>0,YI,1.) |
---|
2685 | WYI = np.zeros_like(xdata) |
---|
2686 | vcov = np.zeros((12,12)) |
---|
2687 | k = 0 |
---|
2688 | for i in range(12): |
---|
2689 | for j in range(i,12): |
---|
2690 | vcov[i][j] = Icovar[k]*Iesd[i]*Iesd[j] |
---|
2691 | vcov[j][i] = Icovar[k]*Iesd[i]*Iesd[j] |
---|
2692 | k += 1 |
---|
2693 | M = np.inner(vcov,DYI.T) |
---|
2694 | WYI = np.sum(M*DYI,axis=0) |
---|
2695 | WYI = np.where(WYI>0.,WYI,0.) |
---|
2696 | return YI,WYI |
---|
2697 | |
---|
2698 | #### RMCutilities ################################################################################ |
---|
2699 | def MakeInst(PWDdata,Name,Size,Mustrain,useSamBrd): |
---|
2700 | inst = PWDdata['Instrument Parameters'][0] |
---|
2701 | sample = PWDdata['Sample Parameters'] |
---|
2702 | Xsb = 0. |
---|
2703 | Ysb = 0. |
---|
2704 | if 'T' in inst['Type'][1]: |
---|
2705 | difC = inst['difC'][1] |
---|
2706 | if useSamBrd[0]: |
---|
2707 | if 'ellipsoidal' not in Size[0]: #take the isotropic term only |
---|
2708 | Xsb = 1.e-4*difC/Size[1][0] |
---|
2709 | if useSamBrd[1]: |
---|
2710 | if 'generalized' not in Mustrain[0]: #take the isotropic term only |
---|
2711 | Ysb = 1.e-6*difC*Mustrain[1][0] |
---|
2712 | prms = ['Bank', |
---|
2713 | 'difC','difA','Zero','2-theta','difB', |
---|
2714 | 'alpha','beta-0','beta-1','beta-q', |
---|
2715 | 'sig-0','sig-1','sig-2','sig-q', |
---|
2716 | 'Z','X','Y'] |
---|
2717 | fname = Name+'.inst' |
---|
2718 | fl = open(fname,'w') |
---|
2719 | fl.write('1\n') |
---|
2720 | fl.write('%d\n'%int(inst[prms[0]][1])) |
---|
2721 | fl.write('%19.11f%19.11f%19.11f%19.11f%19.11f\n'%(inst[prms[1]][1],inst[prms[2]][1],inst[prms[3]][1],inst[prms[4]][1],inst[prms[5]][1],)) |
---|
2722 | fl.write('%12.6e%14.6e%14.6e%14.6e\n'%(inst[prms[6]][1],inst[prms[7]][1],inst[prms[8]][1],inst[prms[9]][1])) |
---|
2723 | fl.write('%12.6e%14.6e%14.6e%14.6e\n'%(inst[prms[10]][1],inst[prms[11]][1],inst[prms[12]][1],inst[prms[13]][1])) |
---|
2724 | fl.write('%12.6e%14.6e%14.6e%14.6e%14.6e\n'%(inst[prms[14]][1],inst[prms[15]][1]+Ysb,inst[prms[16]][1]+Xsb,0.0,0.0)) |
---|
2725 | fl.write('%12.6e\n\n\n'%(sample['Absorption'][0])) |
---|
2726 | fl.close() |
---|
2727 | else: |
---|
2728 | if useSamBrd[0]: |
---|
2729 | wave = G2mth.getWave(inst) |
---|
2730 | if 'ellipsoidal' not in Size[0]: #take the isotropic term only |
---|
2731 | Xsb = 1.8*wave/(np.pi*Size[1][0]) |
---|
2732 | if useSamBrd[1]: |
---|
2733 | if 'generalized' not in Mustrain[0]: #take the isotropic term only |
---|
2734 | Ysb = 0.0180*Mustrain[1][0]/np.pi |
---|
2735 | prms = ['Bank', |
---|
2736 | 'Lam','Zero','Polariz.', |
---|
2737 | 'U','V','W', |
---|
2738 | 'X','Y'] |
---|
2739 | fname = Name+'.inst' |
---|
2740 | fl = open(fname,'w') |
---|
2741 | fl.write('1\n') |
---|
2742 | fl.write('%d\n'%int(inst[prms[0]][1])) |
---|
2743 | fl.write('%10.5f%10.5f%10.4f%10d\n'%(inst[prms[1]][1],100.*inst[prms[2]][1],inst[prms[3]][1],0)) |
---|
2744 | fl.write('%10.3f%10.3f%10.3f\n'%(inst[prms[4]][1],inst[prms[5]][1],inst[prms[6]][1])) |
---|
2745 | fl.write('%10.3f%10.3f%10.3f\n'%(inst[prms[7]][1]+Xsb,inst[prms[8]][1]+Ysb,0.0)) |
---|
2746 | fl.write('%10.3f%10.3f%10.3f\n'%(0.0,0.0,0.0)) |
---|
2747 | fl.write('%12.6e\n\n\n'%(sample['Absorption'][0])) |
---|
2748 | fl.close() |
---|
2749 | return fname |
---|
2750 | |
---|
2751 | def MakeBack(PWDdata,Name): |
---|
2752 | Back = PWDdata['Background'][0] |
---|
2753 | inst = PWDdata['Instrument Parameters'][0] |
---|
2754 | if 'chebyschev-1' != Back[0]: |
---|
2755 | return None |
---|
2756 | Nback = Back[2] |
---|
2757 | BackVals = Back[3:] |
---|
2758 | fname = Name+'.back' |
---|
2759 | fl = open(fname,'w') |
---|
2760 | fl.write('%10d\n'%Nback) |
---|
2761 | for val in BackVals: |
---|
2762 | if 'T' in inst['Type'][1]: |
---|
2763 | fl.write('%12.6g\n'%(float(val))) |
---|
2764 | else: |
---|
2765 | fl.write('%12.6g\n'%val) |
---|
2766 | fl.close() |
---|
2767 | return fname |
---|
2768 | |
---|
2769 | def findDup(Atoms): |
---|
2770 | Dup = [] |
---|
2771 | Fracs = [] |
---|
2772 | for iat1,at1 in enumerate(Atoms): |
---|
2773 | if any([at1[0] in dup for dup in Dup]): |
---|
2774 | continue |
---|
2775 | else: |
---|
2776 | Dup.append([at1[0],]) |
---|
2777 | Fracs.append([at1[6],]) |
---|
2778 | for iat2,at2 in enumerate(Atoms[(iat1+1):]): |
---|
2779 | if np.sum((np.array(at1[3:6])-np.array(at2[3:6]))**2) < 0.00001: |
---|
2780 | Dup[-1] += [at2[0],] |
---|
2781 | Fracs[-1]+= [at2[6],] |
---|
2782 | return Dup,Fracs |
---|
2783 | |
---|
2784 | def MakeRMC6f(PWDdata,Name,Phase,RMCPdict): |
---|
2785 | |
---|
2786 | Meta = RMCPdict['metadata'] |
---|
2787 | Atseq = RMCPdict['atSeq'] |
---|
2788 | Supercell = RMCPdict['SuperCell'] |
---|
2789 | generalData = Phase['General'] |
---|
2790 | Dups,Fracs = findDup(Phase['Atoms']) |
---|
2791 | Sfracs = [np.cumsum(fracs) for fracs in Fracs] |
---|
2792 | ifSfracs = any([np.any(sfracs-1.) for sfracs in Sfracs]) |
---|
2793 | Sample = PWDdata['Sample Parameters'] |
---|
2794 | Meta['temperature'] = Sample['Temperature'] |
---|
2795 | Meta['pressure'] = Sample['Pressure'] |
---|
2796 | Cell = generalData['Cell'][1:7] |
---|
2797 | Trans = np.eye(3)*np.array(Supercell) |
---|
2798 | newPhase = copy.deepcopy(Phase) |
---|
2799 | newPhase['General']['SGData'] = G2spc.SpcGroup('P 1')[1] |
---|
2800 | newPhase['General']['Cell'][1:] = G2lat.TransformCell(Cell,Trans) |
---|
2801 | GB = G2lat.cell2Gmat( newPhase['General']['Cell'][1:7])[0] |
---|
2802 | RMCPdict['Rmax'] = np.min(np.sqrt(np.array([1./G2lat.calc_rDsq2(H,GB) for H in [[1,0,0],[0,1,0],[0,0,1]]])))/2. |
---|
2803 | newPhase,Atcodes = G2lat.TransformPhase(Phase,newPhase,Trans,np.zeros(3),np.zeros(3),ifMag=False,Force=True) |
---|
2804 | Natm = np.core.defchararray.count(np.array(Atcodes),'+') #no. atoms in original unit cell |
---|
2805 | Natm = np.count_nonzero(Natm-1) |
---|
2806 | Atoms = newPhase['Atoms'] |
---|
2807 | reset = False |
---|
2808 | |
---|
2809 | if ifSfracs: |
---|
2810 | Natm = np.core.defchararray.count(np.array(Atcodes),'+') #no. atoms in original unit cell |
---|
2811 | Natm = np.count_nonzero(Natm-1) |
---|
2812 | Satoms = [] |
---|
2813 | for i in range(len(Atoms)//Natm): |
---|
2814 | ind = i*Natm |
---|
2815 | Satoms.append(G2mth.sortArray(G2mth.sortArray(G2mth.sortArray(Atoms[ind:ind+Natm],5),4),3)) |
---|
2816 | Natoms = [] |
---|
2817 | for satoms in Satoms: |
---|
2818 | for idup,dup in enumerate(Dups): |
---|
2819 | ldup = len(dup) |
---|
2820 | natm = len(satoms) |
---|
2821 | i = 0 |
---|
2822 | while i < natm: |
---|
2823 | if satoms[i][0] in dup: |
---|
2824 | atoms = satoms[i:i+ldup] |
---|
2825 | try: |
---|
2826 | atom = atoms[np.searchsorted(Sfracs[idup],rand.random())] |
---|
2827 | Natoms.append(atom) |
---|
2828 | except IndexError: #what about vacancies? |
---|
2829 | if 'Va' not in Atseq: |
---|
2830 | reset = True |
---|
2831 | Atseq.append('Va') |
---|
2832 | RMCPdict['aTypes']['Va'] = 0.0 |
---|
2833 | atom = atoms[0] |
---|
2834 | atom[1] = 'Va' |
---|
2835 | Natoms.append(atom) |
---|
2836 | i += ldup |
---|
2837 | else: |
---|
2838 | i += 1 |
---|
2839 | else: |
---|
2840 | Natoms = Atoms |
---|
2841 | |
---|
2842 | NAtype = np.zeros(len(Atseq)) |
---|
2843 | for atom in Natoms: |
---|
2844 | NAtype[Atseq.index(atom[1])] += 1 |
---|
2845 | NAstr = ['%6d'%i for i in NAtype] |
---|
2846 | Cell = newPhase['General']['Cell'][1:7] |
---|
2847 | if os.path.exists(Name+'.his6f'): |
---|
2848 | os.remove(Name+'.his6f') |
---|
2849 | if os.path.exists(Name+'.neigh'): |
---|
2850 | os.remove(Name+'.neigh') |
---|
2851 | fname = Name+'.rmc6f' |
---|
2852 | fl = open(fname,'w') |
---|
2853 | fl.write('(Version 6f format configuration file)\n') |
---|
2854 | for item in Meta: |
---|
2855 | fl.write('%-20s%s\n'%('Metadata '+item+':',Meta[item])) |
---|
2856 | fl.write('Atom types present: %s\n'%' '.join(Atseq)) |
---|
2857 | fl.write('Number of each atom type: %s\n'%''.join(NAstr)) |
---|
2858 | fl.write('Number of atoms: %d\n'%len(Natoms)) |
---|
2859 | fl.write('%-35s%4d%4d%4d\n'%('Supercell dimensions:',Supercell[0],Supercell[1],Supercell[2])) |
---|
2860 | fl.write('Cell (Ang/deg): %12.6f%12.6f%12.6f%12.6f%12.6f%12.6f\n'%( |
---|
2861 | Cell[0],Cell[1],Cell[2],Cell[3],Cell[4],Cell[5])) |
---|
2862 | A,B = G2lat.cell2AB(Cell,True) |
---|
2863 | fl.write('Lattice vectors (Ang):\n') |
---|
2864 | for i in [0,1,2]: |
---|
2865 | fl.write('%12.6f%12.6f%12.6f\n'%(A[i,0],A[i,1],A[i,2])) |
---|
2866 | fl.write('Atoms (fractional coordinates):\n') |
---|
2867 | nat = 0 |
---|
2868 | for atm in Atseq: |
---|
2869 | for iat,atom in enumerate(Natoms): |
---|
2870 | if atom[1] == atm: |
---|
2871 | nat += 1 |
---|
2872 | atcode = Atcodes[iat].split(':') |
---|
2873 | cell = [0,0,0] |
---|
2874 | if '+' in atcode[1]: |
---|
2875 | cell = eval(atcode[1].split('+')[1]) |
---|
2876 | fl.write('%6d%4s [%s]%19.15f%19.15f%19.15f%6d%4d%4d%4d\n'%( |
---|
2877 | nat,atom[1].strip(),atcode[0],atom[3],atom[4],atom[5],(iat)%Natm+1,cell[0],cell[1],cell[2])) |
---|
2878 | fl.close() |
---|
2879 | return fname,reset |
---|
2880 | |
---|
2881 | def MakeBragg(PWDdata,Name,Phase): |
---|
2882 | generalData = Phase['General'] |
---|
2883 | Vol = generalData['Cell'][7] |
---|
2884 | Data = PWDdata['Data'] |
---|
2885 | Inst = PWDdata['Instrument Parameters'][0] |
---|
2886 | Bank = int(Inst['Bank'][1]) |
---|
2887 | Sample = PWDdata['Sample Parameters'] |
---|
2888 | Scale = Sample['Scale'][0] |
---|
2889 | Limits = PWDdata['Limits'][1] |
---|
2890 | Ibeg = np.searchsorted(Data[0],Limits[0]) |
---|
2891 | Ifin = np.searchsorted(Data[0],Limits[1])+1 |
---|
2892 | fname = Name+'.bragg' |
---|
2893 | fl = open(fname,'w') |
---|
2894 | fl.write('%12d%6d%15.7f%15.4f\n'%(Ifin-Ibeg-2,Bank,Scale,Vol)) |
---|
2895 | if 'T' in Inst['Type'][0]: |
---|
2896 | fl.write('%12s%12s\n'%(' TOF,ms',' I(obs)')) |
---|
2897 | for i in range(Ibeg,Ifin-1): |
---|
2898 | fl.write('%12.8f%12.6f\n'%(Data[0][i]/1000.,Data[1][i])) |
---|
2899 | else: |
---|
2900 | fl.write('%12s%12s\n'%(' 2-theta, deg',' I(obs)')) |
---|
2901 | for i in range(Ibeg,Ifin-1): |
---|
2902 | fl.write('%11.6f%15.2f\n'%(Data[0][i],Data[1][i])) |
---|
2903 | fl.close() |
---|
2904 | return fname |
---|
2905 | |
---|
2906 | def MakeRMCPdat(PWDdata,Name,Phase,RMCPdict): |
---|
2907 | Meta = RMCPdict['metadata'] |
---|
2908 | Times = RMCPdict['runTimes'] |
---|
2909 | Atseq = RMCPdict['atSeq'] |
---|
2910 | Natoms = RMCPdict['NoAtoms'] |
---|
2911 | sumatms = np.sum(np.array([Natoms[iatm] for iatm in Natoms])) |
---|
2912 | Isotope = RMCPdict['Isotope'] |
---|
2913 | Isotopes = RMCPdict['Isotopes'] |
---|
2914 | Atypes = RMCPdict['aTypes'] |
---|
2915 | if 'Va' in Atypes: |
---|
2916 | Isotope['Va'] = 'Nat. Abund.' |
---|
2917 | Isotopes['Va'] = {'Nat. Abund.':{'SL':[0.0,0.0]}} |
---|
2918 | atPairs = RMCPdict['Pairs'] |
---|
2919 | Files = RMCPdict['files'] |
---|
2920 | BraggWt = RMCPdict['histogram'][1] |
---|
2921 | inst = PWDdata['Instrument Parameters'][0] |
---|
2922 | try: |
---|
2923 | pName = Phase['General']['Name'] |
---|
2924 | refList = PWDdata['Reflection Lists'][Name]['RefList'] |
---|
2925 | except TypeError: |
---|
2926 | return 'Error - missing reflection list; you must do Refine first' |
---|
2927 | dMin = refList[-1][4] |
---|
2928 | gsasType = 'xray2' |
---|
2929 | if 'T' in inst['Type'][1]: |
---|
2930 | gsasType = 'gsas3' |
---|
2931 | elif 'X' in inst['Type'][1]: |
---|
2932 | XFF = G2elem.GetFFtable(Atseq) |
---|
2933 | Xfl = open(Name+'.xray','w') |
---|
2934 | for atm in Atseq: |
---|
2935 | fa = XFF[atm]['fa'] |
---|
2936 | fb = XFF[atm]['fb'] |
---|
2937 | fc = XFF[atm]['fc'] |
---|
2938 | Xfl.write('%2s %8.4f%8.4f%8.4f%8.4f%8.4f%8.4f%8.4f%8.4f%8.4f\n'%( |
---|
2939 | atm.upper(),fa[0],fb[0],fa[1],fb[1],fa[2],fb[2],fa[3],fb[3],fc)) |
---|
2940 | Xfl.close() |
---|
2941 | lenA = len(Atseq) |
---|
2942 | Pairs = [] |
---|
2943 | Ncoeff = [] |
---|
2944 | Nblen = [Isotopes[at][Isotope[at]]['SL'][0] for at in Atypes] |
---|
2945 | for pair in [[' %s-%s'%(Atseq[i],Atseq[j]) for j in range(i,lenA)] for i in range(lenA)]: |
---|
2946 | Pairs += pair |
---|
2947 | for pair in Pairs: |
---|
2948 | pair = pair.replace(' ','') |
---|
2949 | at1,at2 = pair.split('-') |
---|
2950 | if at1 == 'Va' or at2 == 'Va': |
---|
2951 | ncoef = 0.0 |
---|
2952 | else: |
---|
2953 | ncoef = Isotopes[at1][Isotope[at1]]['SL'][0]*Natoms[at1]/sumatms |
---|
2954 | ncoef *= Isotopes[at2][Isotope[at2]]['SL'][0]*Natoms[at2]/sumatms |
---|
2955 | if at1 != at2: |
---|
2956 | ncoef *= 2. |
---|
2957 | Ncoeff += [ncoef,] |
---|
2958 | pairMin = [atPairs[pair] if pair in atPairs else [0.0,0.,0.] for pair in Pairs ] |
---|
2959 | maxMoves = [Atypes[atm] if atm in Atypes else 0.0 for atm in Atseq ] |
---|
2960 | fname = Name+'.dat' |
---|
2961 | fl = open(fname,'w') |
---|
2962 | fl.write(' %% Hand edit the following as needed\n') |
---|
2963 | fl.write('TITLE :: '+Name+'\n') |
---|
2964 | fl.write('MATERIAL :: '+Meta['material']+'\n') |
---|
2965 | fl.write('PHASE :: '+Meta['phase']+'\n') |
---|
2966 | fl.write('TEMPERATURE :: '+str(Meta['temperature'])+'\n') |
---|
2967 | fl.write('INVESTIGATOR :: '+Meta['owner']+'\n') |
---|
2968 | if RMCPdict.get('useGPU',False): |
---|
2969 | fl.write('GPU_ACCELERATOR :: 0\n') |
---|
2970 | minHD = ' '.join(['%6.3f'%dist[0] for dist in pairMin]) |
---|
2971 | minD = ' '.join(['%6.3f'%dist[1] for dist in pairMin]) |
---|
2972 | maxD = ' '.join(['%6.3f'%dist[2] for dist in pairMin]) |
---|
2973 | fl.write('MINIMUM_DISTANCES :: %s Angstrom\n'%minHD) |
---|
2974 | maxMv = ' '.join(['%6.3f'%mov for mov in maxMoves]) |
---|
2975 | fl.write('MAXIMUM_MOVES :: %s Angstrom\n'%maxMv) |
---|
2976 | fl.write('R_SPACING :: 0.0200 Angstrom\n') |
---|
2977 | fl.write('PRINT_PERIOD :: 100\n') |
---|
2978 | fl.write('TIME_LIMIT :: %.2f MINUTES\n'%Times[0]) |
---|
2979 | fl.write('SAVE_PERIOD :: %.2f MINUTES\n'%Times[1]) |
---|
2980 | fl.write('\n') |
---|
2981 | fl.write('ATOMS :: '+' '.join(Atseq)+'\n') |
---|
2982 | fl.write('\n') |
---|
2983 | fl.write('FLAGS ::\n') |
---|
2984 | fl.write(' > NO_MOVEOUT\n') |
---|
2985 | fl.write(' > NO_SAVE_CONFIGURATIONS\n') |
---|
2986 | fl.write(' > NO_RESOLUTION_CONVOLUTION\n') |
---|
2987 | fl.write('\n') |
---|
2988 | fl.write('INPUT_CONFIGURATION_FORMAT :: rmc6f\n') |
---|
2989 | fl.write('SAVE_CONFIGURATION_FORMAT :: rmc6f\n') |
---|
2990 | fl.write('IGNORE_HISTORY_FILE ::\n') |
---|
2991 | fl.write('\n') |
---|
2992 | if 'T' in inst['Type'][1]: |
---|
2993 | fl.write('NEUTRON_COEFFICIENTS :: '+''.join(['%9.5f'%coeff for coeff in Ncoeff])+'\n') |
---|
2994 | fl.write('DISTANCE_WINDOW ::\n') |
---|
2995 | fl.write(' > MNDIST :: %s\n'%minD) |
---|
2996 | fl.write(' > MXDIST :: %s\n'%maxD) |
---|
2997 | if len(RMCPdict['Potentials']['Stretch']) or len(RMCPdict['Potentials']['Stretch']): |
---|
2998 | fl.write('\n') |
---|
2999 | fl.write('POTENTIALS ::\n') |
---|
3000 | fl.write(' > TEMPERATURE :: %.1f K\n'%RMCPdict['Potentials']['Pot. Temp.']) |
---|
3001 | fl.write(' > PLOT :: pixels=400, colour=red, zangle=90, zrotation=45 deg\n') |
---|
3002 | if len(RMCPdict['Potentials']['Stretch']): |
---|
3003 | fl.write(' > STRETCH_SEARCH :: %.1f%%\n'%RMCPdict['Potentials']['Stretch search']) |
---|
3004 | for bond in RMCPdict['Potentials']['Stretch']: |
---|
3005 | fl.write(' > STRETCH :: %s %s %.2f eV %.2f Ang\n'%(bond[0],bond[1],bond[3],bond[2])) |
---|
3006 | if len(RMCPdict['Potentials']['Angles']): |
---|
3007 | fl.write(' > ANGLE_SEARCH :: %.1f%%\n'%RMCPdict['Potentials']['Angle search']) |
---|
3008 | for angle in RMCPdict['Potentials']['Angles']: |
---|
3009 | fl.write(' > ANGLE :: %s %s %s %.2f eV %.2f deg %.2f %.2f Ang\n'% |
---|
3010 | (angle[1],angle[0],angle[2],angle[6],angle[3],angle[4],angle[5])) |
---|
3011 | if RMCPdict['useBVS']: |
---|
3012 | fl.write('BVS ::\n') |
---|
3013 | fl.write(' > ATOM :: '+' '.join(Atseq)+'\n') |
---|
3014 | fl.write(' > WEIGHTS :: %s\n'%' '.join(['%6.3f'%RMCPdict['BVS'][bvs][2] for bvs in RMCPdict['BVS']])) |
---|
3015 | oxid = [] |
---|
3016 | for val in RMCPdict['Oxid']: |
---|
3017 | if len(val) == 3: |
---|
3018 | oxid.append(val[0][1:]) |
---|
3019 | else: |
---|
3020 | oxid.append(val[0][2:]) |
---|
3021 | fl.write(' > OXID :: %s\n'%' '.join(oxid)) |
---|
3022 | fl.write(' > RIJ :: %s\n'%' '.join(['%6.3f'%RMCPdict['BVS'][bvs][0] for bvs in RMCPdict['BVS']])) |
---|
3023 | fl.write(' > BVAL :: %s\n'%' '.join(['%6.3f'%RMCPdict['BVS'][bvs][1] for bvs in RMCPdict['BVS']])) |
---|
3024 | fl.write(' > CUTOFF :: %s\n'%' '.join(['%6.3f'%RMCPdict['BVS'][bvs][2] for bvs in RMCPdict['BVS']])) |
---|
3025 | fl.write(' > SAVE :: 100000\n') |
---|
3026 | fl.write(' > UPDATE :: 100000\n') |
---|
3027 | if len(RMCPdict['Swaps']): |
---|
3028 | fl.write('\n') |
---|
3029 | fl.write('SWAP_MULTI ::\n') |
---|
3030 | for swap in RMCPdict['Swaps']: |
---|
3031 | try: |
---|
3032 | at1 = Atseq.index(swap[0]) |
---|
3033 | at2 = Atseq.index(swap[1]) |
---|
3034 | except ValueError: |
---|
3035 | break |
---|
3036 | fl.write(' > SWAP_ATOMS :: %d %d %.2f\n'%(at1,at2,swap[2])) |
---|
3037 | |
---|
3038 | if len(RMCPdict['FxCN']): |
---|
3039 | fl.write('FIXED_COORDINATION_CONSTRAINTS :: %d\n'%len(RMCPdict['FxCN'])) |
---|
3040 | for ifx,fxcn in enumerate(RMCPdict['FxCN']): |
---|
3041 | try: |
---|
3042 | at1 = Atseq.index(fxcn[0]) |
---|
3043 | at2 = Atseq.index(fxcn[1]) |
---|
3044 | except ValueError: |
---|
3045 | break |
---|
3046 | fl.write(' > CSTR%d :: %d %d %.2f %.2f %.2f %.2f %.6f\n'%(ifx+1,at1+1,at2+1,fxcn[2],fxcn[3],fxcn[4],fxcn[5],fxcn[6])) |
---|
3047 | if len(RMCPdict['AveCN']): |
---|
3048 | fl.write('AVERAGE_COORDINATION_CONSTRAINTS :: %d\n'%len(RMCPdict['AveCN'])) |
---|
3049 | for iav,avcn in enumerate(RMCPdict['AveCN']): |
---|
3050 | try: |
---|
3051 | at1 = Atseq.index(avcn[0]) |
---|
3052 | at2 = Atseq.index(avcn[1]) |
---|
3053 | except ValueError: |
---|
3054 | break |
---|
3055 | fl.write(' > CAVSTR%d :: %d %d %.2f %.2f %.2f %.6f\n'%(iav+1,at1+1,at2+1,avcn[2],avcn[3],avcn[4],avcn[5])) |
---|
3056 | for File in Files: |
---|
3057 | if Files[File][0] and Files[File][0] != 'Select': |
---|
3058 | if 'Xray' in File and 'F(Q)' in File: |
---|
3059 | fqdata = open(Files[File][0],'r') |
---|
3060 | lines = int(fqdata.readline()[:-1]) |
---|
3061 | fqdata.close() |
---|
3062 | fl.write('\n') |
---|
3063 | fl.write('%s ::\n'%File.split(';')[0].upper().replace(' ','_')) |
---|
3064 | fl.write(' > FILENAME :: %s\n'%Files[File][0]) |
---|
3065 | fl.write(' > DATA_TYPE :: %s\n'%Files[File][2]) |
---|
3066 | fl.write(' > FIT_TYPE :: %s\n'%Files[File][2]) |
---|
3067 | if 'Xray' not in File: |
---|
3068 | fl.write(' > START_POINT :: 1\n') |
---|
3069 | fl.write(' > END_POINT :: 3000\n') |
---|
3070 | fl.write(' > WEIGHT :: %.4f\n'%Files[File][1]) |
---|
3071 | fl.write(' > CONSTANT_OFFSET 0.000\n') |
---|
3072 | fl.write(' > NO_FITTED_OFFSET\n') |
---|
3073 | if RMCPdict['FitScale']: |
---|
3074 | fl.write(' > FITTED_SCALE\n') |
---|
3075 | else: |
---|
3076 | fl.write(' > NO_FITTED_SCALE\n') |
---|
3077 | if Files[File][3] !='RMC': |
---|
3078 | fl.write(' > %s\n'%Files[File][3]) |
---|
3079 | if 'reciprocal' in File: |
---|
3080 | fl.write(' > CONVOLVE ::\n') |
---|
3081 | if 'Xray' in File: |
---|
3082 | fl.write(' > RECIPROCAL_SPACE_FIT :: 1 %d 1\n'%lines) |
---|
3083 | fl.write(' > RECIPROCAL_SPACE_PARAMETERS :: 1 %d %.4f\n'%(lines,Files[File][1])) |
---|
3084 | fl.write(' > REAL_SPACE_FIT :: 1 %d 1\n'%(3*lines//2)) |
---|
3085 | fl.write(' > REAL_SPACE_PARAMETERS :: 1 %d %.4f\n'%(3*lines//2,1./Files[File][1])) |
---|
3086 | fl.write('\n') |
---|
3087 | fl.write('BRAGG ::\n') |
---|
3088 | fl.write(' > BRAGG_SHAPE :: %s\n'%gsasType) |
---|
3089 | fl.write(' > RECALCUATE\n') |
---|
3090 | fl.write(' > DMIN :: %.2f\n'%(dMin-0.02)) |
---|
3091 | fl.write(' > WEIGHT :: %10.3f\n'%BraggWt) |
---|
3092 | if 'T' in inst['Type'][1]: |
---|
3093 | fl.write(' > SCATTERING LENGTH :: '+''.join(['%8.4f'%blen for blen in Nblen])+'\n') |
---|
3094 | fl.write('\n') |
---|
3095 | fl.write('END ::\n') |
---|
3096 | fl.close() |
---|
3097 | return fname |
---|
3098 | |
---|
3099 | # def FindBonds(Phase,RMCPdict): |
---|
3100 | # generalData = Phase['General'] |
---|
3101 | # cx,ct,cs,cia = generalData['AtomPtrs'] |
---|
3102 | # atomData = Phase['Atoms'] |
---|
3103 | # Res = 'RMC' |
---|
3104 | # if 'macro' in generalData['Type']: |
---|
3105 | # Res = atomData[0][ct-3] |
---|
3106 | # AtDict = {atom[ct-1]:atom[ct] for atom in atomData} |
---|
3107 | # Pairs = RMCPdict['Pairs'] #dict! |
---|
3108 | # BondList = [] |
---|
3109 | # notNames = [] |
---|
3110 | # for FrstName in AtDict: |
---|
3111 | # nbrs = G2mth.FindAllNeighbors(Phase,FrstName,list(AtDict.keys()),notName=notNames,Short=True)[0] |
---|
3112 | # Atyp1 = AtDict[FrstName] |
---|
3113 | # if 'Va' in Atyp1: |
---|
3114 | # continue |
---|
3115 | # for nbr in nbrs: |
---|
3116 | # Atyp2 = AtDict[nbr[0]] |
---|
3117 | # if 'Va' in Atyp2: |
---|
3118 | # continue |
---|
3119 | # try: |
---|
3120 | # bndData = Pairs[' %s-%s'%(Atyp1,Atyp2)][1:] |
---|
3121 | # except KeyError: |
---|
3122 | # bndData = Pairs[' %s-%s'%(Atyp2,Atyp1)][1:] |
---|
3123 | # if any(bndData): |
---|
3124 | # if bndData[0] <= nbr[1] <= bndData[1]: |
---|
3125 | # bondStr = str((FrstName,nbr[0])+tuple(bndData))+',\n' |
---|
3126 | # revbondStr = str((nbr[0],FrstName)+tuple(bndData))+',\n' |
---|
3127 | # if bondStr not in BondList and revbondStr not in BondList: |
---|
3128 | # BondList.append(bondStr) |
---|
3129 | # notNames.append(FrstName) |
---|
3130 | # return Res,BondList |
---|
3131 | |
---|
3132 | # def FindAngles(Phase,RMCPdict): |
---|
3133 | # generalData = Phase['General'] |
---|
3134 | # Cell = generalData['Cell'][1:7] |
---|
3135 | # Amat = G2lat.cell2AB(Cell)[0] |
---|
3136 | # cx,ct,cs,cia = generalData['AtomPtrs'] |
---|
3137 | # atomData = Phase['Atoms'] |
---|
3138 | # AtLookup = G2mth.FillAtomLookUp(atomData,cia+8) |
---|
3139 | # AtDict = {atom[ct-1]:atom[ct] for atom in atomData} |
---|
3140 | # Angles = RMCPdict['Angles'] |
---|
3141 | # AngDict = {'%s-%s-%s'%(angle[0],angle[1],angle[2]):angle[3:] for angle in Angles} |
---|
3142 | # AngleList = [] |
---|
3143 | # for MidName in AtDict: |
---|
3144 | # nbrs,nbrIds = G2mth.FindAllNeighbors(Phase,MidName,list(AtDict.keys()),Short=True) |
---|
3145 | # if len(nbrs) < 2: #need 2 neighbors to make an angle |
---|
3146 | # continue |
---|
3147 | # Atyp2 = AtDict[MidName] |
---|
3148 | # for i,nbr1 in enumerate(nbrs): |
---|
3149 | # Atyp1 = AtDict[nbr1[0]] |
---|
3150 | # for j,nbr3 in enumerate(nbrs[i+1:]): |
---|
3151 | # Atyp3 = AtDict[nbr3[0]] |
---|
3152 | # IdList = [nbrIds[1][i],nbrIds[0],nbrIds[1][i+j+1]] |
---|
3153 | # try: |
---|
3154 | # angData = AngDict['%s-%s-%s'%(Atyp1,Atyp2,Atyp3)] |
---|
3155 | # except KeyError: |
---|
3156 | # try: |
---|
3157 | # angData = AngDict['%s-%s-%s'%(Atyp3,Atyp2,Atyp1)] |
---|
3158 | # except KeyError: |
---|
3159 | # continue |
---|
3160 | # XYZ = np.array(G2mth.GetAtomItemsById(atomData,AtLookup,IdList,cx,numItems=3)) |
---|
3161 | # calAngle = G2mth.getRestAngle(XYZ,Amat) |
---|
3162 | # if angData[0] <= calAngle <= angData[1]: |
---|
3163 | # angStr = str((MidName,nbr1[0],nbr3[0])+tuple(angData))+',\n' |
---|
3164 | # revangStr = str((MidName,nbr3[0],nbr1[0])+tuple(angData))+',\n' |
---|
3165 | # if angStr not in AngleList and revangStr not in AngleList: |
---|
3166 | # AngleList.append(angStr) |
---|
3167 | # return AngleList |
---|
3168 | |
---|
3169 | # def GetSqConvolution(XY,d): |
---|
3170 | |
---|
3171 | # n = XY.shape[1] |
---|
3172 | # snew = np.zeros(n) |
---|
3173 | # dq = np.zeros(n) |
---|
3174 | # sold = XY[1] |
---|
3175 | # q = XY[0] |
---|
3176 | # dq[1:] = np.diff(q) |
---|
3177 | # dq[0] = dq[1] |
---|
3178 | |
---|
3179 | # for j in range(n): |
---|
3180 | # for i in range(n): |
---|
3181 | # b = abs(q[i]-q[j]) |
---|
3182 | # t = q[i]+q[j] |
---|
3183 | # if j == i: |
---|
3184 | # snew[j] += q[i]*sold[i]*(d-np.sin(t*d)/t)*dq[i] |
---|
3185 | # else: |
---|
3186 | # snew[j] += q[i]*sold[i]*(np.sin(b*d)/b-np.sin(t*d)/t)*dq[i] |
---|
3187 | # snew[j] /= np.pi*q[j] |
---|
3188 | |
---|
3189 | # snew[0] = snew[1] |
---|
3190 | # return snew |
---|
3191 | |
---|
3192 | # def GetMaxSphere(pdbName): |
---|
3193 | # try: |
---|
3194 | # pFil = open(pdbName,'r') |
---|
3195 | # except FileNotFoundError: |
---|
3196 | # return None |
---|
3197 | # while True: |
---|
3198 | # line = pFil.readline() |
---|
3199 | # if 'Boundary' in line: |
---|
3200 | # line = line.split()[3:] |
---|
3201 | # G = np.array([float(item) for item in line]) |
---|
3202 | # G = np.reshape(G,(3,3))**2 |
---|
3203 | # G = nl.inv(G) |
---|
3204 | # pFil.close() |
---|
3205 | # break |
---|
3206 | # dspaces = [0.5/np.sqrt(G2lat.calc_rDsq2(H,G)) for H in np.eye(3)] |
---|
3207 | # return min(dspaces) |
---|
3208 | |
---|
3209 | def findfullrmc(): |
---|
3210 | '''Find where fullrmc is installed. Tries the following: |
---|
3211 | |
---|
3212 | 1. Returns the Config var 'fullrmc_exec', if defined. If an executable |
---|
3213 | is found at that location it is assumed to run and supply |
---|
3214 | fullrmc 5.0+ |
---|
3215 | 2. The path is checked for a fullrmc image as named by Bachir |
---|
3216 | |
---|
3217 | :returns: the full path to a python executable that is assumed to |
---|
3218 | have fullrmc installed or None, if it was not found. |
---|
3219 | ''' |
---|
3220 | fullrmc_exe = GSASIIpath.GetConfigValue('fullrmc_exec') |
---|
3221 | if fullrmc_exe is not None and is_exe(fullrmc_exe): |
---|
3222 | return fullrmc_exe |
---|
3223 | pathlist = os.environ["PATH"].split(os.pathsep) |
---|
3224 | for p in (GSASIIpath.path2GSAS2,GSASIIpath.binaryPath,os.getcwd(), |
---|
3225 | os.path.split(sys.executable)[0]): |
---|
3226 | if p not in pathlist: pathlist.append(p) |
---|
3227 | import glob |
---|
3228 | for p in pathlist: |
---|
3229 | if sys.platform == "win32": |
---|
3230 | lookfor = "fullrmc5*.exe" |
---|
3231 | else: |
---|
3232 | lookfor = "fullrmc5*64bit" |
---|
3233 | fl = glob.glob(os.path.join(p,lookfor)) |
---|
3234 | if len(fl) > 0: |
---|
3235 | fullrmc_exe = os.path.abspath(sorted(fl)[0]) |
---|
3236 | if GSASIIpath.GetConfigValue('debug'): |
---|
3237 | print('fullrmc found as',fullrmc_exe) |
---|
3238 | return fullrmc_exe |
---|
3239 | |
---|
3240 | def fullrmcDownload(): |
---|
3241 | '''Downloads the fullrmc executable from Bachir's site to the current |
---|
3242 | GSAS-II binary directory. |
---|
3243 | |
---|
3244 | Does some error checking. |
---|
3245 | ''' |
---|
3246 | import os |
---|
3247 | import requests |
---|
3248 | import platform |
---|
3249 | if platform.architecture()[0] != '64bit': |
---|
3250 | return "fullrmc is only available for 64 bit machines. This is 32 bit" |
---|
3251 | setXbit = True |
---|
3252 | if sys.platform == "darwin": |
---|
3253 | URL = "https://github.com/bachiraoun/fullrmc/raw/master/standalones/fullrmc500_3p8p6_macOS-10p16-x86_64-i386-64bit" |
---|
3254 | elif sys.platform == "win32": |
---|
3255 | setXbit = False |
---|
3256 | URL = "https://github.com/bachiraoun/fullrmc/raw/master/standalones/fullrmc500_3p8p10_Windows-10-10p0p19041-SP0.exe" |
---|
3257 | else: |
---|
3258 | if 'aarch' in platform.machine() or 'arm' in platform.machine(): |
---|
3259 | return "Sorry, fullrmc is only available for Intel-compatible machines." |
---|
3260 | URL = "https://github.com/bachiraoun/fullrmc/raw/master/standalones/fullrmc500_3p8p5_Linux-4p19p121-linuxkit-x86_64-with-glibc2p29" |
---|
3261 | |
---|
3262 | GSASIIpath.SetBinaryPath() |
---|
3263 | fil = os.path.join(GSASIIpath.binaryPath,os.path.split(URL)[1]) |
---|
3264 | print('Starting installation of fullrmc\nDownloading from', |
---|
3265 | 'https://github.com/bachiraoun/fullrmc/tree/master/standalones', |
---|
3266 | '\nCreating '+fil, |
---|
3267 | '\nThis may take a while...') |
---|
3268 | open(fil, "wb").write(requests.get(URL).content) |
---|
3269 | print('...Download completed') |
---|
3270 | if setXbit: |
---|
3271 | import stat |
---|
3272 | os.chmod(fil, os.stat(fil).st_mode | stat.S_IEXEC) |
---|
3273 | return '' |
---|
3274 | |
---|
3275 | def findPDFfit(): |
---|
3276 | '''Find if PDFfit2 is installed (may be local to GSAS-II). Does the following: |
---|
3277 | :returns: two items: (1) the full path to a python executable or None, if |
---|
3278 | it was not found and (2) path(s) to the PDFfit2 location(s) as a list. |
---|
3279 | |
---|
3280 | ''' |
---|
3281 | if GSASIIpath.GetConfigValue('pdffit2_exec') is not None and is_exe( |
---|
3282 | GSASIIpath.GetConfigValue('pdffit2_exec')): |
---|
3283 | return GSASIIpath.GetConfigValue('pdffit2_exec'),None |
---|
3284 | pdffitloc = os.path.join(GSASIIpath.path2GSAS2,'PDFfit2') |
---|
3285 | if not os.path.exists(pdffitloc): |
---|
3286 | print('PDFfit2 not found in GSAS-II \n\t(expected in '+pdffitloc+')') |
---|
3287 | return None,[] |
---|
3288 | if pdffitloc not in sys.path: sys.path.append(pdffitloc) |
---|
3289 | try: |
---|
3290 | from diffpy.pdffit2 import PdfFit |
---|
3291 | import diffpy |
---|
3292 | import inspect |
---|
3293 | pdffitloc = [os.path.dirname(os.path.dirname(inspect.getfile(diffpy)))] |
---|
3294 | # is this the original version of diffpy (w/pdffit2.py) |
---|
3295 | try: |
---|
3296 | from diffpy.pdffit2 import pdffit2 |
---|
3297 | except ImportError: |
---|
3298 | # or the GSAS-II version w/o; for this we need to find the binary's location |
---|
3299 | try: |
---|
3300 | import pdffit2 # added for GSAS-II to relocate binary file |
---|
3301 | except ImportError: |
---|
3302 | print('\nError: pdffit2 failed to load with this python\n') |
---|
3303 | return None,[] |
---|
3304 | except ModuleNotFoundError: |
---|
3305 | print('\nGSAS-II does not have a PDFfit2 module compatible\nwith this Python interpreter\n') |
---|
3306 | return None,[] |
---|
3307 | pdffitloc += [os.path.dirname(inspect.getfile(pdffit2))] |
---|
3308 | return sys.executable,pdffitloc |
---|
3309 | except Exception as msg: |
---|
3310 | print('Error importing PDFfit2:\n',msg) |
---|
3311 | return None,[] |
---|
3312 | |
---|
3313 | def GetPDFfitAtomVar(Phase,RMCPdict): |
---|
3314 | ''' Find dict of independent "@n" variables for PDFfit in atom constraints |
---|
3315 | ''' |
---|
3316 | General = Phase['General'] |
---|
3317 | Atoms = Phase['Atoms'] |
---|
3318 | cx,ct,cs,cia = General['AtomPtrs'] |
---|
3319 | AtomVar = RMCPdict['AtomVar'] |
---|
3320 | varnames = [] |
---|
3321 | for iat,atom in enumerate(RMCPdict['AtomConstr']): |
---|
3322 | for it,item in enumerate(atom): |
---|
3323 | if it > 1 and item: |
---|
3324 | itms = item.split('@') |
---|
3325 | for itm in itms[1:]: |
---|
3326 | itnum = itm[:2] |
---|
3327 | varname = '@%s'%itnum |
---|
3328 | varnames.append(varname) |
---|
3329 | if it < 6: |
---|
3330 | if varname not in AtomVar: |
---|
3331 | AtomVar[varname] = 0.0 #put ISODISTORT mode displ here? |
---|
3332 | else: |
---|
3333 | for i in range(3): |
---|
3334 | if varname not in AtomVar: |
---|
3335 | AtomVar[varname] = Atoms[iat][cia+i+2] |
---|
3336 | varnames = set(varnames) |
---|
3337 | for name in list(AtomVar.keys()): #clear out unused parameters |
---|
3338 | if name not in varnames: |
---|
3339 | del AtomVar[name] |
---|
3340 | |
---|
3341 | def MakePDFfitAtomsFile(Phase,RMCPdict): |
---|
3342 | '''Make the PDFfit atoms file |
---|
3343 | ''' |
---|
3344 | General = Phase['General'] |
---|
3345 | if General['SGData']['SpGrp'] != 'P 1': |
---|
3346 | return 'Space group symmetry must be lowered to P 1 for PDFfit' |
---|
3347 | fName = General['Name']+'-PDFfit.stru' |
---|
3348 | fName = fName.replace(' ','_') |
---|
3349 | if 'sequential' in RMCPdict['refinement']: |
---|
3350 | fName = 'Sequential_PDFfit.stru' |
---|
3351 | fatm = open(fName,'w') |
---|
3352 | fatm.write('title structure of '+General['Name']+'\n') |
---|
3353 | fatm.write('format pdffit\n') |
---|
3354 | fatm.write('scale 1.000000\n') #fixed |
---|
3355 | sharp = '%10.6f,%10.6f,%10.6f,%10.6f\n'%(RMCPdict['delta2'][0],RMCPdict['delta1'][0],RMCPdict['sratio'][0],RMCPdict['rcut']) |
---|
3356 | fatm.write('sharp '+sharp) |
---|
3357 | shape = '' |
---|
3358 | if RMCPdict['shape'] == 'sphere' and RMCPdict['spdiameter'][0] > 0.: |
---|
3359 | shape = ' sphere, %10.6f\n'%RMCPdict['spdiameter'][0] |
---|
3360 | elif RMCPdict['stepcut'] > 0.: |
---|
3361 | shape = 'stepcut, %10.6f\n'%RMCPdict['stepcut'] |
---|
3362 | if shape: |
---|
3363 | fatm.write('shape '+shape) |
---|
3364 | fatm.write('spcgr %s\n'%RMCPdict['SGData']['SpGrp'].replace(' ','')) |
---|
3365 | cell = General['Cell'][1:7] |
---|
3366 | fatm.write('cell %10.6f,%10.6f,%10.6f,%10.6f,%10.6f,%10.6f\n'%( |
---|
3367 | cell[0],cell[1],cell[2],cell[3],cell[4],cell[5])) |
---|
3368 | fatm.write('dcell '+5*' 0.000000,'+' 0.000000\n') |
---|
3369 | Atoms = Phase['Atoms'] |
---|
3370 | fatm.write('ncell %8d,%8d,%8d,%10d\n'%(1,1,1,len(Atoms))) |
---|
3371 | fatm.write('atoms\n') |
---|
3372 | cx,ct,cs,cia = General['AtomPtrs'] |
---|
3373 | for atom in Atoms: |
---|
3374 | fatm.write('%4s%18.8f%18.8f%18.8f%13.4f\n'%(atom[ct][:2].ljust(2),atom[cx],atom[cx+1],atom[cx+2],atom[cx+3])) |
---|
3375 | fatm.write(' '+'%18.8f%18.8f%18.8f%13.4f\n'%(0.,0.,0.,0.)) |
---|
3376 | fatm.write(' '+'%18.8f%18.8f%18.8f\n'%(atom[cia+2],atom[cia+3],atom[cia+4])) |
---|
3377 | fatm.write(' '+'%18.8f%18.8f%18.8f\n'%(0.,0.,0.,)) |
---|
3378 | fatm.write(' '+'%18.8f%18.8f%18.8f\n'%(atom[cia+5],atom[cia+6],atom[cia+7])) |
---|
3379 | fatm.write(' '+'%18.8f%18.8f%18.8f\n'%(0.,0.,0.)) |
---|
3380 | fatm.close() |
---|
3381 | |
---|
3382 | def MakePDFfitRunFile(Phase,RMCPdict): |
---|
3383 | '''Make the PDFfit python run file |
---|
3384 | ''' |
---|
3385 | |
---|
3386 | def GetCellConstr(SGData): |
---|
3387 | if SGData['SGLaue'] in ['m3', 'm3m']: |
---|
3388 | return [1,1,1,0,0,0] |
---|
3389 | elif SGData['SGLaue'] in ['3','3m1','31m','6/m','6/mmm','4/m','4/mmm']: |
---|
3390 | return [1,1,2,0,0,0] |
---|
3391 | elif SGData['SGLaue'] in ['3R','3mR']: |
---|
3392 | return [1,1,1,2,2,2] |
---|
3393 | elif SGData['SGLaue'] == 'mmm': |
---|
3394 | return [1,2,3,0,0,0] |
---|
3395 | elif SGData['SGLaue'] == '2/m': |
---|
3396 | if SGData['SGUniq'] == 'a': |
---|
3397 | return [1,2,3,4,0,0] |
---|
3398 | elif SGData['SGUniq'] == 'b': |
---|
3399 | return [1,2,3,0,4,0] |
---|
3400 | elif SGData['SGUniq'] == 'c': |
---|
3401 | return [1,2,3,0,0,4] |
---|
3402 | else: |
---|
3403 | return [1,2,3,4,5,6] |
---|
3404 | |
---|
3405 | General = Phase['General'] |
---|
3406 | Cell = General['Cell'][1:7] |
---|
3407 | rundata = '''#!/usr/bin/env python |
---|
3408 | # -*- coding: utf-8 -*- |
---|
3409 | import sys,os |
---|
3410 | datadir = r'{:}' |
---|
3411 | pathWrap = lambda f: os.path.join(datadir,f) |
---|
3412 | '''.format(os.path.abspath(os.getcwd())) |
---|
3413 | PDFfit_exe,PDFfit_path = findPDFfit() # returns python loc and path(s) for pdffit |
---|
3414 | if not PDFfit_exe: |
---|
3415 | print('PDFfit2 is not found. Creating .sh file without paths.') |
---|
3416 | if PDFfit_path: |
---|
3417 | for p in PDFfit_path: |
---|
3418 | rundata += "sys.path.append(r'{:}')\n".format(p) |
---|
3419 | rundata += 'from diffpy.pdffit2 import PdfFit\n' |
---|
3420 | rundata += 'pf = PdfFit()\n' |
---|
3421 | Nd = 0 |
---|
3422 | Np = 0 |
---|
3423 | parms = {} |
---|
3424 | parmNames = {} |
---|
3425 | if 'sequential' in RMCPdict['refinement']: |
---|
3426 | Np = 3 |
---|
3427 | rundata += '#sequential data here\n' |
---|
3428 | else: |
---|
3429 | for fil in RMCPdict['files']: |
---|
3430 | filNam = RMCPdict['files'][fil][0] |
---|
3431 | if 'Select' in filNam: |
---|
3432 | continue |
---|
3433 | if 'Neutron' in fil: |
---|
3434 | Nd += 1 |
---|
3435 | dType = 'Ndata' |
---|
3436 | else: |
---|
3437 | Nd += 1 |
---|
3438 | dType = 'Xdata' |
---|
3439 | rundata += "pf.read_data(pathWrap(r'%s'), '%s', 30.0, %.4f)\n"%(filNam,dType[0],RMCPdict[dType]['qdamp'][0]) |
---|
3440 | rundata += 'pf.setdata(%d)\n'%Nd |
---|
3441 | rundata += 'pf.pdfrange(%d, %6.2f, %6.2f)\n'%(Nd,RMCPdict[dType]['Fitrange'][0],RMCPdict[dType]['Fitrange'][1]) |
---|
3442 | for item in ['dscale','qdamp','qbroad']: |
---|
3443 | if RMCPdict[dType][item][1]: |
---|
3444 | Np += 1 |
---|
3445 | rundata += 'pf.constrain(pf.%s(),"@%d")\n'%(item,Np) |
---|
3446 | parms[Np] = RMCPdict[dType][item][0] |
---|
3447 | parmNames[Np] = item |
---|
3448 | fName = General['Name']+'-PDFfit.stru' |
---|
3449 | fName = fName.replace(' ','_') |
---|
3450 | if 'sequential' in RMCPdict['refinement']: |
---|
3451 | fName = 'Sequential_PDFfit.stru' |
---|
3452 | Np = 9 |
---|
3453 | rundata += "pf.read_struct(pathWrap(r'{:}'))\n".format(fName) |
---|
3454 | for item in ['delta1','delta2','sratio']: |
---|
3455 | if RMCPdict[item][1]: |
---|
3456 | Np += 1 |
---|
3457 | rundata += 'pf.constrain(pf.%s,"@%d")\n'%(item,Np) |
---|
3458 | parms[Np] = RMCPdict[item][0] |
---|
3459 | parmNames[Np] = item |
---|
3460 | if 'sphere' in RMCPdict['shape'] and RMCPdict['spdiameter'][1]: |
---|
3461 | Np += 1 |
---|
3462 | rundata += 'pf.constrain(pf.spdiameter,"@%d")\n'%Np |
---|
3463 | parms[Np] = RMCPdict['spdiameter'][0] |
---|
3464 | parmNames[Np] = 'spdiameter' |
---|
3465 | |
---|
3466 | if RMCPdict['cellref']: |
---|
3467 | cellconst = GetCellConstr(RMCPdict['SGData']) |
---|
3468 | used = [] |
---|
3469 | cellNames = ['a','b','c','alpha','beta','gamma'] |
---|
3470 | for ic in range(6): |
---|
3471 | if cellconst[ic]: |
---|
3472 | rundata += 'pf.constrain(pf.lat(%d), "@%d")\n'%(ic+1,Np+cellconst[ic]) |
---|
3473 | if cellconst[ic] not in used: |
---|
3474 | parms[Np+cellconst[ic]] = Cell[ic] |
---|
3475 | parmNames[Np+cellconst[ic]] = cellNames[ic] |
---|
3476 | used.append(cellconst[ic]) |
---|
3477 | #Atom constraints here ------------------------------------------------------- |
---|
3478 | AtomVar = RMCPdict['AtomVar'] |
---|
3479 | used = [] |
---|
3480 | for iat,atom in enumerate(RMCPdict['AtomConstr']): |
---|
3481 | for it,item in enumerate(atom): |
---|
3482 | names = ['pf.x(%d)'%(iat+1),'pf.y(%d)'%(iat+1),'pf.z(%d)'%(iat+1),'pf.occ(%d)'%(iat+1)] |
---|
3483 | if it > 1 and item: |
---|
3484 | itms = item.split('@') |
---|
3485 | once = False |
---|
3486 | for itm in itms[1:]: |
---|
3487 | try: |
---|
3488 | itnum = int(itm[:2]) |
---|
3489 | except ValueError: |
---|
3490 | print(' *** ERROR - invalid string in atom constraint %s ***'%(item)) |
---|
3491 | return None |
---|
3492 | if it < 6: |
---|
3493 | if not once: |
---|
3494 | rundata += 'pf.constrain(%s,"%s")\n'%(names[it-2],item) |
---|
3495 | once = True |
---|
3496 | if itnum not in used: |
---|
3497 | parms[itnum] = AtomVar['@%d'%itnum] |
---|
3498 | parmNames[itnum] = names[it-2].split('.')[1] |
---|
3499 | used.append(itnum) |
---|
3500 | else: |
---|
3501 | uijs = ['pf.u11(%d)'%(iat+1),'pf.u22(%d)'%(iat+1),'pf.u33(%d)'%(iat+1)] |
---|
3502 | for i in range(3): |
---|
3503 | rundata += 'pf.constrain(%s,"%s")\n'%(uijs[i],item) |
---|
3504 | if itnum not in used: |
---|
3505 | parms[itnum] = AtomVar['@%d'%itnum] |
---|
3506 | parmNames[itnum] = uijs[i].split('.')[1] |
---|
3507 | used.append(itnum) |
---|
3508 | |
---|
3509 | if 'sequential' in RMCPdict['refinement']: |
---|
3510 | rundata += '#parameters here\n' |
---|
3511 | RMCPdict['Parms'] = parms #{'n':val,...} |
---|
3512 | RMCPdict['ParmNames'] = parmNames #{'n':name,...} |
---|
3513 | else: |
---|
3514 | # set parameter values |
---|
3515 | for iprm in parms: |
---|
3516 | rundata += 'pf.setpar(%d,%.6f)\n'%(iprm,parms[iprm]) |
---|
3517 | |
---|
3518 | # Save results --------------------------------------------------------------- |
---|
3519 | rundata += 'pf.refine()\n' |
---|
3520 | if 'sequential' in RMCPdict['refinement']: |
---|
3521 | fName = 'Sequential_PDFfit' |
---|
3522 | rfile = open('Seq_PDFfit_template.py','w') |
---|
3523 | rundata += 'pf.save_pdf(1, pathWrap("%s"))\n'%(fName+'.fgr') |
---|
3524 | else: |
---|
3525 | fName = General['Name'].replace(' ','_')+'-PDFfit' |
---|
3526 | rfile = open(fName+'.py','w') |
---|
3527 | Nd = 0 |
---|
3528 | for file in RMCPdict['files']: |
---|
3529 | if 'Select' in RMCPdict['files'][file][0]: #skip unselected |
---|
3530 | continue |
---|
3531 | Nd += 1 |
---|
3532 | rundata += 'pf.save_pdf(%d, pathWrap("%s"))\n'%(Nd,fName+file[0]+'.fgr') |
---|
3533 | |
---|
3534 | rundata += 'pf.save_struct(1, pathWrap("%s"))\n'%(fName+'.rstr') |
---|
3535 | rundata += 'pf.save_res(pathWrap("%s"))\n'%(fName+'.res') |
---|
3536 | |
---|
3537 | rfile.writelines(rundata) |
---|
3538 | rfile.close() |
---|
3539 | |
---|
3540 | return fName+'.py' |
---|
3541 | |
---|
3542 | def GetSeqCell(SGData,parmDict): |
---|
3543 | ''' For use in processing PDFfit sequential results |
---|
3544 | ''' |
---|
3545 | try: |
---|
3546 | if SGData['SGLaue'] in ['m3', 'm3m']: |
---|
3547 | cell = [parmDict['11'][0],parmDict['11'][0],parmDict['11'][0],90.,90.,90.] |
---|
3548 | elif SGData['SGLaue'] in ['3','3m1','31m','6/m','6/mmm','4/m','4/mmm']: |
---|
3549 | cell = [parmDict['11'][0],parmDict['11'][0],parmDict['12'][0],90.,90.,90.] |
---|
3550 | elif SGData['SGLaue'] in ['3R','3mR']: |
---|
3551 | cell = [parmDict['11'][0],parmDict['11'][0],parmDict['11'][0], |
---|
3552 | parmDict['12'][0],parmDict['12'][0],parmDict['12'][0]] |
---|
3553 | elif SGData['SGLaue'] == 'mmm': |
---|
3554 | cell = [parmDict['11'][0],parmDict['12'][0],parmDict['13'][0],90.,90.,90.] |
---|
3555 | elif SGData['SGLaue'] == '2/m': |
---|
3556 | if SGData['SGUniq'] == 'a': |
---|
3557 | cell = [parmDict['11'][0],parmDict['12'][0],parmDict['13'][0],parmDict['14'][0],90.,90.] |
---|
3558 | elif SGData['SGUniq'] == 'b': |
---|
3559 | cell = [parmDict['11'][0],parmDict['12'][0],parmDict['13'][0],90.,parmDict['14'][0],90.] |
---|
3560 | elif SGData['SGUniq'] == 'c': |
---|
3561 | cell = [parmDict['11'][0],parmDict['12'][0],parmDict['13'][0],90.,90.,parmDict['14'][0]] |
---|
3562 | else: |
---|
3563 | cell = [parmDict['11'][0],parmDict['12'][0],parmDict['13'][0], |
---|
3564 | parmDict['14'][0],parmDict['15'][0],parmDict['16'][0]] |
---|
3565 | return G2lat.cell2A(cell) |
---|
3566 | except KeyError: |
---|
3567 | return None |
---|
3568 | |
---|
3569 | def UpdatePDFfit(Phase,RMCPdict): |
---|
3570 | ''' Updates various PDFfit parameters held in GSAS-II |
---|
3571 | ''' |
---|
3572 | |
---|
3573 | General = Phase['General'] |
---|
3574 | if RMCPdict['refinement'] == 'normal': |
---|
3575 | fName = General['Name']+'-PDFfit.rstr' |
---|
3576 | try: |
---|
3577 | rstr = open(fName.replace(' ','_'),'r') |
---|
3578 | except FileNotFoundError: |
---|
3579 | return [fName,'Not found - PDFfit failed'] |
---|
3580 | lines = rstr.readlines() |
---|
3581 | rstr.close() |
---|
3582 | header = [line[:-1].split(' ',1) for line in lines[:7]] |
---|
3583 | resdict = dict(header) |
---|
3584 | for item in ['sharp','cell']: |
---|
3585 | resdict[item] = [float(val) for val in resdict[item].split(',')] |
---|
3586 | General['Cell'][1:7] = resdict['cell'] |
---|
3587 | for inam,name in enumerate(['delta2','delta1','sratio']): |
---|
3588 | RMCPdict[name][0] = float(resdict['sharp'][inam]) |
---|
3589 | if 'shape' in resdict: |
---|
3590 | if 'sphere' in resdict['shape']: |
---|
3591 | RMCPdict['spdiameter'][0] = float(resdict['shape'].split()[-1]) |
---|
3592 | else: |
---|
3593 | RMCPdict['stepcut'][0] = float(resdict['shape'][-1]) |
---|
3594 | cx,ct,cs,ci = G2mth.getAtomPtrs(Phase) |
---|
3595 | Atoms = Phase['Atoms'] |
---|
3596 | atmBeg = 0 |
---|
3597 | for line in lines: |
---|
3598 | atmBeg += 1 |
---|
3599 | if 'atoms' in line: |
---|
3600 | break |
---|
3601 | for atom in Atoms: |
---|
3602 | atstr = lines[atmBeg][:-1].split() |
---|
3603 | Uiistr = lines[atmBeg+2][:-1].split() |
---|
3604 | Uijstr = lines[atmBeg+4][:-1].split() |
---|
3605 | atom[cx:cx+4] = [float(atstr[1]),float(atstr[2]),float(atstr[3]),float(atstr[4])] |
---|
3606 | atom[ci] = 'A' |
---|
3607 | atom[ci+2:ci+5] = [float(Uiistr[0]),float(Uiistr[1]),float(Uiistr[2])] |
---|
3608 | atom[ci+5:ci+8] = [float(Uijstr[0]),float(Uijstr[1]),float(Uijstr[2])] |
---|
3609 | atmBeg += 6 |
---|
3610 | fName = General['Name']+'-PDFfit.res' |
---|
3611 | else: |
---|
3612 | fName = 'Sequential_PDFfit.res' |
---|
3613 | try: |
---|
3614 | res = open(fName.replace(' ','_'),'r') |
---|
3615 | except FileNotFoundError: |
---|
3616 | return [fName,'Not found - PDFfit failed'] |
---|
3617 | lines = res.readlines() |
---|
3618 | res.close() |
---|
3619 | Ibeg = False |
---|
3620 | resline = '' |
---|
3621 | XNdata = {'Xdata':RMCPdict['Xdata'],'Ndata':RMCPdict['Ndata']} |
---|
3622 | for line in lines: |
---|
3623 | if 'Radiation' in line and 'X-Rays' in line: |
---|
3624 | dkey = 'Xdata' |
---|
3625 | if 'Radiation' in line and'Neutrons' in line: |
---|
3626 | dkey = 'Ndata' |
---|
3627 | if 'Qdamp' in line and '(' in line: |
---|
3628 | XNdata[dkey]['qdamp'][0] = float(line.split()[4]) |
---|
3629 | if 'Qbroad' in line and '(' in line: |
---|
3630 | XNdata[dkey]['qbroad'][0] = float(line.split()[4]) |
---|
3631 | if 'Scale' in line and '(' in line: |
---|
3632 | XNdata[dkey]['dscale'][0] = float(line.split()[3]) |
---|
3633 | |
---|
3634 | for iline,line in enumerate(lines): |
---|
3635 | if 'Refinement parameters' in line: |
---|
3636 | Ibeg = True |
---|
3637 | continue |
---|
3638 | if Ibeg: |
---|
3639 | if '---------' in line: |
---|
3640 | break |
---|
3641 | resline += line[:-1] |
---|
3642 | for iline,line in enumerate(lines): |
---|
3643 | if 'Rw - ' in line: |
---|
3644 | if 'nan' in line: |
---|
3645 | Rwp = 100.0 |
---|
3646 | else: |
---|
3647 | Rwp = float(line.split(':')[1]) |
---|
3648 | results = resline.replace('(','').split(')')[:-1] |
---|
3649 | results = ['@'+result.lstrip() for result in results] |
---|
3650 | results = [item.split() for item in results] |
---|
3651 | RMCPdict['Parms'] = dict([[item[0][1:-1],float(item[1])] for item in results]) #{'n':val,...} |
---|
3652 | if RMCPdict['refinement'] == 'normal': |
---|
3653 | fName = General['Name']+'-PDFfit.py' |
---|
3654 | py = open(fName.replace(' ','_'),'r') |
---|
3655 | pylines = py.readlines() |
---|
3656 | py.close() |
---|
3657 | py = open(fName.replace(' ','_'),'w') |
---|
3658 | newpy = [] |
---|
3659 | for pyline in pylines: |
---|
3660 | if 'setpar' in pyline: |
---|
3661 | parm = pyline.split('(')[1].split(',')[0] |
---|
3662 | newpy.append('pf.setpar(%s,%.5f)\n'%(parm,RMCPdict['Parms'][parm])) |
---|
3663 | else: |
---|
3664 | newpy.append(pyline) |
---|
3665 | py.writelines(newpy) |
---|
3666 | py.close() |
---|
3667 | RMCPdict.update(XNdata) |
---|
3668 | results = dict([[item[0][:-1],float(item[1])] for item in results if item[0][:-1] in RMCPdict['AtomVar']]) |
---|
3669 | RMCPdict['AtomVar'].update(results) |
---|
3670 | return None |
---|
3671 | else: #sequential |
---|
3672 | newParms = dict([[item[0][1:-1],[float(item[1]),float(item[2])]] for item in results]) #{'n':[val,esd],...} |
---|
3673 | return newParms,Rwp |
---|
3674 | |
---|
3675 | def MakefullrmcSupercell(Phase,RMCPdict): |
---|
3676 | '''Create a fullrmc supercell from GSAS-II |
---|
3677 | |
---|
3678 | :param dict Phase: phase information from data tree |
---|
3679 | :param dict RMCPdict: fullrmc parameters from GUI |
---|
3680 | :param list grpDict: a list of lists where the inner list |
---|
3681 | contains the atom numbers contained in each group. e.g. |
---|
3682 | [[0,1,2,3,4],[5,6],[4,6]] creates three groups with |
---|
3683 | atoms 0-4 in the first |
---|
3684 | atoms 5 & 6 in the second and |
---|
3685 | atoms 4 & 6 in the third. Note that it is fine that |
---|
3686 | atom 4 appears in two groups. |
---|
3687 | ''' |
---|
3688 | #for i in (0,1): grpDict[i].append(1) # debug: 1st & 2nd atoms in 2nd group |
---|
3689 | cell = Phase['General']['Cell'][1:7] |
---|
3690 | A,B = G2lat.cell2AB(cell) |
---|
3691 | cx,ct,cs,cia = Phase['General']['AtomPtrs'] |
---|
3692 | SGData = Phase['General']['SGData'] |
---|
3693 | atomlist = [] |
---|
3694 | for i,atom in enumerate(Phase['Atoms']): |
---|
3695 | el = ''.join([i for i in atom[ct] if i.isalpha()]) |
---|
3696 | grps = [j for j,g in enumerate(RMCPdict.get('Groups',[])) if i in g] |
---|
3697 | atomlist.append((el, atom[ct-1], grps)) |
---|
3698 | # create a list of coordinates with symmetry & unit cell translation duplicates |
---|
3699 | coordlist = [] |
---|
3700 | cellnum = -1 |
---|
3701 | for a in range(int(0.5-RMCPdict['SuperCell'][0]/2),int(1+RMCPdict['SuperCell'][0]/2)): |
---|
3702 | for b in range(int(0.5-RMCPdict['SuperCell'][1]/2),int(1+RMCPdict['SuperCell'][1]/2)): |
---|
3703 | for c in range(int(0.5-RMCPdict['SuperCell'][2]/2),int(1+RMCPdict['SuperCell'][2]/2)): |
---|
3704 | cellnum += 1 |
---|
3705 | for i,atom in enumerate(Phase['Atoms']): |
---|
3706 | for item in G2spc.GenAtom(atom[cx:cx+3],SGData,Move=False): |
---|
3707 | # if i == 0: print(item[0]+[a,b,c]) |
---|
3708 | xyzOrth = np.inner(A,item[0]+[a,b,c]) |
---|
3709 | #coordlist.append((i,list(xyzOrth),cellnum,list(item[0]+[a,b,c]))) |
---|
3710 | coordlist.append((item[1],cellnum,i,list(xyzOrth))) |
---|
3711 | return atomlist,coordlist |
---|
3712 | |
---|
3713 | def MakefullrmcRun(pName,Phase,RMCPdict): |
---|
3714 | '''Creates a script to run fullrmc. Returns the name of the file that was |
---|
3715 | created. |
---|
3716 | ''' |
---|
3717 | BondList = {} |
---|
3718 | for k in RMCPdict['Pairs']: |
---|
3719 | if RMCPdict['Pairs'][k][1]+RMCPdict['Pairs'][k][2]>0: |
---|
3720 | BondList[k] = (RMCPdict['Pairs'][k][1],RMCPdict['Pairs'][k][2]) |
---|
3721 | AngleList = [] |
---|
3722 | for angle in RMCPdict['Angles']: |
---|
3723 | if angle[3] == angle[4] or angle[5] >= angle[6] or angle[6] <= 0: |
---|
3724 | continue |
---|
3725 | for i in (0,1,2): |
---|
3726 | angle[i] = angle[i].strip() |
---|
3727 | AngleList.append(angle) |
---|
3728 | # rmin = RMCPdict['min Contact'] |
---|
3729 | cell = Phase['General']['Cell'][1:7] |
---|
3730 | SymOpList = G2spc.AllOps(Phase['General']['SGData'])[0] |
---|
3731 | cx,ct,cs,cia = Phase['General']['AtomPtrs'] |
---|
3732 | atomsList = [] |
---|
3733 | for atom in Phase['Atoms']: |
---|
3734 | el = ''.join([i for i in atom[ct] if i.isalpha()]) |
---|
3735 | atomsList.append([el] + atom[cx:cx+4]) |
---|
3736 | projDir,projName = os.path.split(os.path.abspath(pName)) |
---|
3737 | scrname = pName+'-fullrmc.py' |
---|
3738 | restart = '%s_restart.pdb'%pName |
---|
3739 | Files = RMCPdict['files'] |
---|
3740 | rundata = '' |
---|
3741 | rundata += '## fullrmc %s file ##\n## OK to edit this by hand ##\n'%scrname |
---|
3742 | rundata += '# created in '+__file__+" v"+filversion.split()[1] |
---|
3743 | rundata += dt.datetime.strftime(dt.datetime.now()," at %Y-%m-%dT%H:%M\n") |
---|
3744 | rundata += ''' |
---|
3745 | # fullrmc imports (all that are potentially useful) |
---|
3746 | import os,glob |
---|
3747 | import time |
---|
3748 | import pickle |
---|
3749 | import types |
---|
3750 | import copy |
---|
3751 | import numpy as np |
---|
3752 | import matplotlib as mpl |
---|
3753 | import fullrmc |
---|
3754 | from pdbparser import pdbparser |
---|
3755 | from pdbparser.Utilities.Database import __ATOM__ |
---|
3756 | from fullrmc.Core import Collection |
---|
3757 | from fullrmc.Engine import Engine |
---|
3758 | import fullrmc.Constraints.PairDistributionConstraints as fPDF |
---|
3759 | from fullrmc.Constraints.StructureFactorConstraints import ReducedStructureFactorConstraint, StructureFactorConstraint |
---|
3760 | from fullrmc.Constraints.RadialDistributionConstraints import RadialDistributionConstraint |
---|
3761 | from fullrmc.Constraints.StructureFactorConstraints import NormalizedStructureFactorConstraint |
---|
3762 | from fullrmc.Constraints.DistanceConstraints import DistanceConstraint |
---|
3763 | from fullrmc.Constraints.BondConstraints import BondConstraint |
---|
3764 | from fullrmc.Constraints.AngleConstraints import BondsAngleConstraint |
---|
3765 | from fullrmc.Constraints.DihedralAngleConstraints import DihedralAngleConstraint |
---|
3766 | from fullrmc.Generators.Swaps import SwapPositionsGenerator |
---|
3767 | from fullrmc.Core.MoveGenerator import MoveGeneratorCollector |
---|
3768 | from fullrmc.Generators.Translations import TranslationGenerator |
---|
3769 | from fullrmc.Generators.Rotations import RotationGenerator |
---|
3770 | |
---|
3771 | # utility routines |
---|
3772 | def writeHeader(ENGINE,statFP): |
---|
3773 | """header for stats file""" |
---|
3774 | statFP.write('generated-steps, total-error, ') |
---|
3775 | for c in ENGINE.constraints: |
---|
3776 | statFP.write(c.constraintName) |
---|
3777 | statFP.write(', ') |
---|
3778 | statFP.write('\\n') |
---|
3779 | statFP.flush() |
---|
3780 | |
---|
3781 | def writeCurrentStatus(ENGINE,statFP,plotF): |
---|
3782 | """line in stats file & current constraint plots""" |
---|
3783 | statFP.write(str(ENGINE.generated)) |
---|
3784 | statFP.write(', ') |
---|
3785 | statFP.write(str(ENGINE.totalStandardError)) |
---|
3786 | statFP.write(', ') |
---|
3787 | for c in ENGINE.constraints: |
---|
3788 | statFP.write(str(c.standardError)) |
---|
3789 | statFP.write(', ') |
---|
3790 | statFP.write('\\n') |
---|
3791 | statFP.flush() |
---|
3792 | mpl.use('agg') |
---|
3793 | fp = open(plotF,'wb') |
---|
3794 | for c in ENGINE.constraints: |
---|
3795 | p = c.plot(show=False) |
---|
3796 | p[0].canvas.draw() |
---|
3797 | image = p[0].canvas.buffer_rgba() |
---|
3798 | pickle.dump(c.constraintName,fp) |
---|
3799 | pickle.dump(np.array(image),fp) |
---|
3800 | fp.close() |
---|
3801 | |
---|
3802 | def calcRmax(ENGINE): |
---|
3803 | """from Bachir, works for non-orthorhombic cells""" |
---|
3804 | a,b,c = ENGINE.basisVectors |
---|
3805 | lens = [] |
---|
3806 | ts = np.linalg.norm(np.cross(a,b))/2 |
---|
3807 | lens.extend( [ts/np.linalg.norm(a), ts/np.linalg.norm(b)] ) |
---|
3808 | ts = np.linalg.norm(np.cross(b,c))/2 |
---|
3809 | lens.extend( [ts/np.linalg.norm(b), ts/np.linalg.norm(c)] ) |
---|
3810 | ts = np.linalg.norm(np.cross(a,c))/2 |
---|
3811 | lens.extend( [ts/np.linalg.norm(a), ts/np.linalg.norm(c)] ) |
---|
3812 | return min(lens) |
---|
3813 | ''' |
---|
3814 | if RMCPdict.get('Groups',[]): rundata += ''' |
---|
3815 | def makepdb(atoms, coords, bbox=None): |
---|
3816 | """creates a supercell directly from atom info""" |
---|
3817 | # used when ENGINE.build_crystal_set_pdb is not called |
---|
3818 | prevcell = None |
---|
3819 | rec = copy.copy(__ATOM__) |
---|
3820 | rec['residue_name'] = 'MOL' |
---|
3821 | records = [] |
---|
3822 | seqNum = 0 |
---|
3823 | segId = '0' |
---|
3824 | groups = {} |
---|
3825 | for symcell in set([(sym,cell) for sym,cell,atm,xyz in coords]): |
---|
3826 | seqNum += 1 |
---|
3827 | if seqNum == 9999: |
---|
3828 | seqNum = 1 |
---|
3829 | segId = str(int(segId) + 1) |
---|
3830 | for i,(sym,cell,atm,(x,y,z)) in enumerate(coords): |
---|
3831 | if (sym,cell) != symcell: continue |
---|
3832 | rec = copy.copy(rec) |
---|
3833 | for grp in atoms[atm][2]: |
---|
3834 | if (sym,cell) not in groups: |
---|
3835 | groups[(sym,cell)] = {} |
---|
3836 | if grp not in groups[(sym,cell)]: |
---|
3837 | groups[(sym,cell)][grp] = [len(records)] |
---|
3838 | else: |
---|
3839 | groups[(sym,cell)][grp].append(len(records)) |
---|
3840 | rec['coordinates_x'] = x |
---|
3841 | rec['coordinates_y'] = y |
---|
3842 | rec['coordinates_z'] = z |
---|
3843 | rec['element_symbol'] = atoms[atm][0] |
---|
3844 | rec['atom_name'] = atoms[atm][1] |
---|
3845 | rec['sequence_number'] = seqNum |
---|
3846 | rec['segment_identifier'] = segId |
---|
3847 | records.append(rec) |
---|
3848 | # create pdb |
---|
3849 | pdb = pdbparser() |
---|
3850 | pdb.records = records |
---|
3851 | if groups: |
---|
3852 | return pdb,[groups[j][i] for j in groups for i in groups[j]] |
---|
3853 | else: |
---|
3854 | return pdb,[] |
---|
3855 | ''' |
---|
3856 | rundata += ''' |
---|
3857 | ### When True, erases an existing engine to provide a fresh start |
---|
3858 | FRESH_START = {:} |
---|
3859 | dirName = "{:}" |
---|
3860 | prefix = "{:}" |
---|
3861 | project = prefix + "-fullrmc" |
---|
3862 | time0 = time.time() |
---|
3863 | '''.format(RMCPdict['ReStart'][0],projDir,projName) |
---|
3864 | |
---|
3865 | rundata += '# setup structure\n' |
---|
3866 | rundata += 'cell = ' + str(cell) + '\n' |
---|
3867 | rundata += 'supercell = ' + str(RMCPdict['SuperCell']) + '\n' |
---|
3868 | rundata += '\n# define structure info\n' |
---|
3869 | if RMCPdict.get('Groups',[]): |
---|
3870 | # compute bounding box coordinates |
---|
3871 | bbox = [] |
---|
3872 | A,B = G2lat.cell2AB(cell) |
---|
3873 | for i in range(3): |
---|
3874 | for val in int(0.5-RMCPdict['SuperCell'][i]/2),int(1+RMCPdict['SuperCell'][0]/2): |
---|
3875 | fpos = [0,0,0] |
---|
3876 | fpos[i] = val |
---|
3877 | bbox.append(np.inner(A,fpos)) |
---|
3878 | rundata += 'bboxlist = [ # orthogonal coordinate for supercell corners\n' |
---|
3879 | for i in bbox: |
---|
3880 | rundata += ' '+str(list(i))+',\n' |
---|
3881 | rundata += ' ] # bboxlist\n\n' |
---|
3882 | atomlist,coordlist = MakefullrmcSupercell(Phase,RMCPdict) |
---|
3883 | rundata += 'atomlist = [ # [element, label, grouplist]\n' |
---|
3884 | for i in atomlist: |
---|
3885 | rundata += ' '+str(i)+',\n' |
---|
3886 | rundata += ' ] # atomlist\n\n' |
---|
3887 | rundata += 'coordlist = [ # (sym#, cell#, atom#, [ortho coords],)\n' |
---|
3888 | for i in coordlist: |
---|
3889 | rundata += ' '+str(i)+',\n' |
---|
3890 | rundata += ' ] # coordlist\n' |
---|
3891 | else: |
---|
3892 | rundata += "SymOpList = "+str([i.lower() for i in SymOpList]) + '\n' |
---|
3893 | rundata += 'atomList = ' + str(atomsList).replace('],','],\n ') + '\n' |
---|
3894 | |
---|
3895 | rundata += '\n# initialize engine\n' |
---|
3896 | rundata += ''' |
---|
3897 | engineFileName = os.path.join(dirName, project + '.rmc') |
---|
3898 | projectStats = os.path.join(dirName, project + '.stats') |
---|
3899 | projectPlots = os.path.join(dirName, project + '.plots') |
---|
3900 | projectXYZ = os.path.join(dirName, project + '.atoms') |
---|
3901 | pdbFile = os.path.join(dirName, project + '_restart.pdb') |
---|
3902 | # check Engine exists if so (and not FRESH_START) load it otherwise build it |
---|
3903 | ENGINE = Engine(path=None) |
---|
3904 | if not ENGINE.is_engine(engineFileName) or FRESH_START: |
---|
3905 | ENGINE = Engine(path=engineFileName, freshStart=True) |
---|
3906 | ''' |
---|
3907 | if RMCPdict.get('Groups',[]): |
---|
3908 | rundata += ''' |
---|
3909 | # create structure from GSAS-II constructed supercell |
---|
3910 | bbox = (np.array(bboxlist[1::2])-np.array(bboxlist[0::2])).flatten() |
---|
3911 | pdb,grouplist = makepdb(atomlist,coordlist,bbox) |
---|
3912 | ENGINE.set_pdb(pdb) |
---|
3913 | ENGINE.set_boundary_conditions(bbox) |
---|
3914 | if grouplist: ENGINE.set_groups(grouplist) |
---|
3915 | ''' |
---|
3916 | if RMCPdict.get('GroupMode',0) == 0: # 'Rotate & Translate' |
---|
3917 | rundata += ''' |
---|
3918 | for g in ENGINE.groups: |
---|
3919 | TMG = TranslationGenerator(amplitude=0.2) # create translation generator |
---|
3920 | if len(g) > 1: # create rotation generator for groups with more than 1 atom |
---|
3921 | RMG = RotationGenerator(amplitude=2) |
---|
3922 | MG = MoveGeneratorCollector(collection=[TMG,RMG],randomize=True) |
---|
3923 | else: |
---|
3924 | MG = MoveGeneratorCollector(collection=[TMG],randomize=True) |
---|
3925 | g.set_move_generator( MG ) |
---|
3926 | ''' |
---|
3927 | elif RMCPdict.get('GroupMode',0) == 1: # 'Rotate only' |
---|
3928 | rundata += ''' |
---|
3929 | for g in ENGINE.groups: |
---|
3930 | if len(g) > 1: # create rotation generator for groups with more than 1 atom |
---|
3931 | RMG = RotationGenerator(amplitude=2) |
---|
3932 | g.set_move_generator( RMG ) |
---|
3933 | ''' |
---|
3934 | else: # 'Translate only' |
---|
3935 | rundata += ' # translate only set by default' |
---|
3936 | else: |
---|
3937 | rundata += ''' |
---|
3938 | # create structure, let fullrmc construct supercell |
---|
3939 | ENGINE.build_crystal_set_pdb(symOps = SymOpList, |
---|
3940 | atoms = atomList, |
---|
3941 | unitcellBC = cell, |
---|
3942 | supercell = supercell) |
---|
3943 | ENGINE.set_groups_as_atoms() |
---|
3944 | ''' |
---|
3945 | rundata += ' rho0 = len(ENGINE.allNames)/ENGINE.volume\n' |
---|
3946 | rundata += '\n # "Constraints" (includes experimental data) setup\n' |
---|
3947 | # settings that require a new Engine |
---|
3948 | for File in Files: |
---|
3949 | filDat = RMCPdict['files'][File] |
---|
3950 | if not os.path.exists(filDat[0]): continue |
---|
3951 | sfwt = 'neutronCohb' |
---|
3952 | if 'Xray' in File: |
---|
3953 | sfwt = 'atomicNumber' |
---|
3954 | if 'G(r)' in File: |
---|
3955 | rundata += ' GR = np.loadtxt(os.path.join(dirName,"%s")).T\n'%filDat[0] |
---|
3956 | if filDat[3] == 0: |
---|
3957 | #rundata += ''' # read and xform G(r) as defined in RMCProfile |
---|
3958 | # see eq. 44 in Keen, J. Appl. Cryst. (2001) 34 172-177\n''' |
---|
3959 | #rundata += ' GR[1] *= 4 * np.pi * GR[0] * rho0 / sumCiBi2\n' |
---|
3960 | #rundata += ' GofR = fPDF.PairDistributionConstraint(experimentalData=GR.T, weighting="%s")\n'%sfwt |
---|
3961 | rundata += ' # G(r) as defined in RMCProfile\n' |
---|
3962 | rundata += ' GofR = RadialDistributionConstraint(experimentalData=GR.T, weighting="%s")\n'%sfwt |
---|
3963 | elif filDat[3] == 1: |
---|
3964 | rundata += ' # This is G(r) as defined in PDFFIT\n' |
---|
3965 | rundata += ' GofR = fPDF.PairDistributionConstraint(experimentalData=GR.T, weighting="%s")\n'%sfwt |
---|
3966 | elif filDat[3] == 2: |
---|
3967 | rundata += ' # This is g(r)\n' |
---|
3968 | rundata += ' GofR = fPDF.PairCorrelationConstraint(experimentalData=GR.T, weighting="%s")\n'%sfwt |
---|
3969 | else: |
---|
3970 | raise ValueError('Invalid G(r) type: '+str(filDat[3])) |
---|
3971 | rundata += ' ENGINE.add_constraints([GofR])\n' |
---|
3972 | rundata += ' GofR.set_limits((None, calcRmax(ENGINE)))\n' |
---|
3973 | if RMCPdict['addThermalBroadening']: |
---|
3974 | rundata += " GofR.set_thermal_corrections({'defaultFactor': 0.001})\n" |
---|
3975 | rundata += " GofR.thermalCorrections['factors'] = {\n" |
---|
3976 | RMCPdict['addThermalBroadening'] |
---|
3977 | for atm1 in RMCPdict['aTypes']: |
---|
3978 | for atm2 in RMCPdict['aTypes']: |
---|
3979 | rundata += " ('{}', '{}'): {},\n".format( |
---|
3980 | atm1,atm2, |
---|
3981 | (RMCPdict['ThermalU'].get(atm1,0.005)+ RMCPdict['ThermalU'].get(atm2,0.005))/2) |
---|
3982 | rundata += ' }\n' |
---|
3983 | elif '(Q)' in File: |
---|
3984 | rundata += ' SOQ = np.loadtxt(os.path.join(dirName,"%s")).T\n'%filDat[0] |
---|
3985 | if filDat[3] == 0: |
---|
3986 | rundata += ' # F(Q) as defined in RMCProfile\n' |
---|
3987 | #rundata += ' SOQ[1] *= 1 / sumCiBi2\n' |
---|
3988 | if filDat[4]: |
---|
3989 | rundata += ' SOQ[1] = Collection.sinc_convolution(q=SOQ[0],sq=SOQ[1],rmax=calcRmax(ENGINE))\n' |
---|
3990 | rundata += ' SofQ = NormalizedStructureFactorConstraint(experimentalData=SOQ.T, weighting="%s")\n'%sfwt |
---|
3991 | elif filDat[3] == 1: |
---|
3992 | rundata += ' # S(Q) as defined in PDFFIT\n' |
---|
3993 | rundata += ' SOQ[1] -= 1\n' |
---|
3994 | if filDat[4]: |
---|
3995 | rundata += ' SOQ[1] = Collection.sinc_convolution(q=SOQ[0],sq=SOQ[1],rmax=calcRmax(ENGINE))\n' |
---|
3996 | rundata += ' SofQ = ReducedStructureFactorConstraint(experimentalData=SOQ.T, weighting="%s")\n'%sfwt |
---|
3997 | else: |
---|
3998 | raise ValueError('Invalid S(Q) type: '+str(filDat[3])) |
---|
3999 | rundata += ' ENGINE.add_constraints([SofQ])\n' |
---|
4000 | else: |
---|
4001 | print('What is this?') |
---|
4002 | minDists = '' |
---|
4003 | if BondList and RMCPdict.get('useBondConstraints',True): |
---|
4004 | rundata += ''' B_CONSTRAINT = BondConstraint() |
---|
4005 | ENGINE.add_constraints(B_CONSTRAINT) |
---|
4006 | B_CONSTRAINT.create_supercell_bonds(bondsDefinition=[ |
---|
4007 | ''' |
---|
4008 | for pair in BondList: |
---|
4009 | e1,e2 = pair.split('-') |
---|
4010 | d1,d2 = BondList[pair] |
---|
4011 | if d1 == 0: continue |
---|
4012 | if d2 == 0: |
---|
4013 | minDists += '("element","{}","{}",{}),'.format(e1.strip(),e2.strip(),d1) |
---|
4014 | else: |
---|
4015 | rundata += ' ("element","{}","{}",{},{}),\n'.format( |
---|
4016 | e1.strip(),e2.strip(),d1,d2) |
---|
4017 | rundata += ' ])\n' |
---|
4018 | rundata += ' D_CONSTRAINT = DistanceConstraint(defaultLowerDistance={})\n'.format(RMCPdict['min Contact']) |
---|
4019 | if minDists: |
---|
4020 | rundata += " D_CONSTRAINT.set_pairs_definition( {'inter':[" + minDists + "]})\n" |
---|
4021 | rundata += ' ENGINE.add_constraints(D_CONSTRAINT)\n' |
---|
4022 | |
---|
4023 | if AngleList: |
---|
4024 | rundata += ''' A_CONSTRAINT = BondsAngleConstraint() |
---|
4025 | ENGINE.add_constraints(A_CONSTRAINT) |
---|
4026 | A_CONSTRAINT.create_supercell_angles(anglesDefinition=[ |
---|
4027 | ''' |
---|
4028 | for item in AngleList: |
---|
4029 | rundata += (' '+ |
---|
4030 | '("element","{1}","{0}","{2}",{5},{6},{5},{6},{3},{4}),\n'.format(*item)) |
---|
4031 | rundata += ' ])\n' |
---|
4032 | rundata += ''' |
---|
4033 | for f in glob.glob(os.path.join(dirName,prefix+"_*.log")): os.remove(f) |
---|
4034 | ENGINE.save() |
---|
4035 | else: |
---|
4036 | ENGINE = ENGINE.load(path=engineFileName) |
---|
4037 | |
---|
4038 | ENGINE.set_log_file(os.path.join(dirName,prefix)) |
---|
4039 | ''' |
---|
4040 | if RMCPdict['Swaps']: |
---|
4041 | rundata += '\n#set up for site swaps\n' |
---|
4042 | rundata += 'aN = ENGINE.allNames\n' |
---|
4043 | rundata += 'SwapGen = {}\n' |
---|
4044 | for swap in RMCPdict['Swaps']: |
---|
4045 | rundata += 'SwapA = [[idx] for idx in range(len(aN)) if aN[idx]=="%s"]\n'%swap[0] |
---|
4046 | rundata += 'SwapB = [[idx] for idx in range(len(aN)) if aN[idx]=="%s"]\n'%swap[1] |
---|
4047 | rundata += 'SwapGen["%s-%s"] = [SwapPositionsGenerator(swapList=SwapA),SwapPositionsGenerator(swapList=SwapB),%.2f]\n'%(swap[0],swap[1],swap[2]) |
---|
4048 | rundata += ' for swaps in SwapGen:\n' |
---|
4049 | rundata += ' AB = swaps.split("-")\n' |
---|
4050 | rundata += ' ENGINE.set_groups_as_atoms()\n' |
---|
4051 | rundata += ' for g in ENGINE.groups:\n' |
---|
4052 | rundata += ' if aN[g.indexes[0]]==AB[0]:\n' |
---|
4053 | rundata += ' g.set_move_generator(SwapGen[swaps][0])\n' |
---|
4054 | rundata += ' elif aN[g.indexes[0]]==AB[1]:\n' |
---|
4055 | rundata += ' g.set_move_generator(SwapGen[swaps][1])\n' |
---|
4056 | rundata += ' sProb = SwapGen[swaps][2]\n' |
---|
4057 | rundata += '''for c in ENGINE.constraints: |
---|
4058 | if hasattr(c, '_ExperimentalConstraint__adjustScaleFactor'): |
---|
4059 | def _constraint_copy_needs_lut(self, *args, **kwargs): |
---|
4060 | result = super(self.__class__, self)._constraint_copy_needs_lut(*args, **kwargs) |
---|
4061 | result['_ExperimentalConstraint__adjustScaleFactor'] = '_ExperimentalConstraint__adjustScaleFactor' |
---|
4062 | return result |
---|
4063 | c._constraint_copy_needs_lut = types.MethodType(_constraint_copy_needs_lut, c) |
---|
4064 | ''' |
---|
4065 | # rundata += '\n# set weights -- do this now so values can be changed without a restart\n' |
---|
4066 | # rundata += 'wtDict = {}\n' |
---|
4067 | # for File in Files: |
---|
4068 | # filDat = RMCPdict['files'][File] |
---|
4069 | # if not os.path.exists(filDat[0]): continue |
---|
4070 | # if 'Xray' in File: |
---|
4071 | # sfwt = 'atomicNumber' |
---|
4072 | # else: |
---|
4073 | # sfwt = 'neutronCohb' |
---|
4074 | # if 'G(r)' in File: |
---|
4075 | # typ = 'Pair' |
---|
4076 | # elif '(Q)' in File: |
---|
4077 | # typ = 'Struct' |
---|
4078 | # rundata += 'wtDict["{}-{}"] = {}\n'.format(typ,sfwt,filDat[1]) |
---|
4079 | rundata += '\n# set PDF fitting range\n' |
---|
4080 | rundata += 'for c in ENGINE.constraints: # loop over predefined constraints\n' |
---|
4081 | rundata += ' if type(c) is fPDF.PairDistributionConstraint:\n' |
---|
4082 | # rundata += ' c.set_variance_squared(1./wtDict["Pair-"+c.weighting])\n' |
---|
4083 | rundata += ' c.set_limits((None,calcRmax(ENGINE)))\n' |
---|
4084 | if RMCPdict['FitScale']: |
---|
4085 | rundata += ' c.set_adjust_scale_factor((10, 0.01, 100.))\n' |
---|
4086 | # rundata += ' c.set_variance_squared(1./wtDict["Struct-"+c.weighting])\n' |
---|
4087 | if RMCPdict['FitScale']: |
---|
4088 | rundata += ' elif type(c) is ReducedStructureFactorConstraint:\n' |
---|
4089 | rundata += ' c.set_adjust_scale_factor((10, 0.01, 100.))\n' |
---|
4090 | # torsions difficult to implement, must be internal to cell & named with |
---|
4091 | # fullrmc atom names |
---|
4092 | # if len(RMCPdict['Torsions']): # Torsions currently commented out in GUI |
---|
4093 | # rundata += 'for c in ENGINE.constraints: # look for Dihedral Angle Constraints\n' |
---|
4094 | # rundata += ' if type(c) is DihedralAngleConstraint:\n' |
---|
4095 | # rundata += ' c.set_variance_squared(%f)\n'%RMCPdict['Torsion Weight'] |
---|
4096 | # rundata += ' c.create_angles_by_definition(anglesDefinition={"%s":[\n'%Res |
---|
4097 | # for torsion in RMCPdict['Torsions']: |
---|
4098 | # rundata += ' %s\n'%str(tuple(torsion)) |
---|
4099 | # rundata += ' ]})\n' |
---|
4100 | rundata += ''' |
---|
4101 | if FRESH_START: |
---|
4102 | # initialize engine with one step to get starting config energetics |
---|
4103 | ENGINE.run(restartPdb=pdbFile,numberOfSteps=1, saveFrequency=1) |
---|
4104 | statFP = open(projectStats,'w') |
---|
4105 | writeHeader(ENGINE,statFP) |
---|
4106 | writeCurrentStatus(ENGINE,statFP,projectPlots) |
---|
4107 | else: |
---|
4108 | statFP = open(projectStats,'a') |
---|
4109 | |
---|
4110 | # setup runs for fullrmc |
---|
4111 | ''' |
---|
4112 | rundata += 'steps = {}\n'.format(RMCPdict['Steps/cycle']) |
---|
4113 | rundata += 'for _ in range({}):\n'.format(RMCPdict['Cycles']) |
---|
4114 | rundata += ' expected = ENGINE.generated+steps\n' |
---|
4115 | |
---|
4116 | rundata += ' ENGINE.run(restartPdb=pdbFile,numberOfSteps=steps, saveFrequency=steps)\n' |
---|
4117 | rundata += ' writeCurrentStatus(ENGINE,statFP,projectPlots)\n' |
---|
4118 | rundata += ' if ENGINE.generated != expected: break # run was stopped' |
---|
4119 | rundata += ''' |
---|
4120 | statFP.close() |
---|
4121 | fp = open(projectXYZ,'w') # save final atom positions |
---|
4122 | fp.write('cell: {} {} {} {} {} {}\\n') |
---|
4123 | fp.write('supercell: {} {} {}\\n') |
---|
4124 | '''.format(*cell,*RMCPdict['SuperCell']) |
---|
4125 | rundata += '''# loop over atoms |
---|
4126 | for n,e,(x,y,z) in zip(ENGINE.allNames, |
---|
4127 | ENGINE.allElements,ENGINE.realCoordinates): |
---|
4128 | fp.write('{} {} {:.5f} {:.5f} {:.5f}\\n'.format(n,e,x,y,z)) |
---|
4129 | fp.close() |
---|
4130 | print("ENGINE run time %.2f s"%(time.time()-time0)) |
---|
4131 | ''' |
---|
4132 | rfile = open(scrname,'w') |
---|
4133 | rfile.writelines(rundata) |
---|
4134 | rfile.close() |
---|
4135 | return scrname |
---|
4136 | |
---|
4137 | def GetRMCBonds(general,RMCPdict,Atoms,bondList): |
---|
4138 | bondDist = [] |
---|
4139 | Cell = general['Cell'][1:7] |
---|
4140 | Supercell = RMCPdict['SuperCell'] |
---|
4141 | Trans = np.eye(3)*np.array(Supercell) |
---|
4142 | Cell = G2lat.TransformCell(Cell,Trans)[:6] |
---|
4143 | Amat,Bmat = G2lat.cell2AB(Cell) |
---|
4144 | indices = (-1,0,1) |
---|
4145 | Units = np.array([[h,k,l] for h in indices for k in indices for l in indices]) |
---|
4146 | for bonds in bondList: |
---|
4147 | Oxyz = np.array(Atoms[bonds[0]][1:]) |
---|
4148 | Txyz = np.array([Atoms[tgt-1][1:] for tgt in bonds[1]]) |
---|
4149 | Dx = np.array([Txyz-Oxyz+unit for unit in Units]) |
---|
4150 | Dx = np.sqrt(np.sum(np.inner(Dx,Amat)**2,axis=2)) |
---|
4151 | for dx in Dx.T: |
---|
4152 | bondDist.append(np.min(dx)) |
---|
4153 | return np.array(bondDist) |
---|
4154 | |
---|
4155 | def GetRMCAngles(general,RMCPdict,Atoms,angleList): |
---|
4156 | bondAngles = [] |
---|
4157 | Cell = general['Cell'][1:7] |
---|
4158 | Supercell = RMCPdict['SuperCell'] |
---|
4159 | Trans = np.eye(3)*np.array(Supercell) |
---|
4160 | Cell = G2lat.TransformCell(Cell,Trans)[:6] |
---|
4161 | Amat,Bmat = G2lat.cell2AB(Cell) |
---|
4162 | indices = (-1,0,1) |
---|
4163 | Units = np.array([[h,k,l] for h in indices for k in indices for l in indices]) |
---|
4164 | for angle in angleList: |
---|
4165 | Oxyz = np.array(Atoms[angle[0]][1:]) |
---|
4166 | TAxyz = np.array([Atoms[tgt-1][1:] for tgt in angle[1].T[0]]) |
---|
4167 | TBxyz = np.array([Atoms[tgt-1][1:] for tgt in angle[1].T[1]]) |
---|
4168 | DAxV = np.inner(np.array([TAxyz-Oxyz+unit for unit in Units]),Amat) |
---|
4169 | DAx = np.sqrt(np.sum(DAxV**2,axis=2)) |
---|
4170 | DBxV = np.inner(np.array([TBxyz-Oxyz+unit for unit in Units]),Amat) |
---|
4171 | DBx = np.sqrt(np.sum(DBxV**2,axis=2)) |
---|
4172 | iDAx = np.argmin(DAx,axis=0) |
---|
4173 | iDBx = np.argmin(DBx,axis=0) |
---|
4174 | for i,[iA,iB] in enumerate(zip(iDAx,iDBx)): |
---|
4175 | DAv = DAxV[iA,i]/DAx[iA,i] |
---|
4176 | DBv = DBxV[iB,i]/DBx[iB,i] |
---|
4177 | bondAngles.append(npacosd(np.sum(DAv*DBv))) |
---|
4178 | return np.array(bondAngles) |
---|
4179 | |
---|
4180 | def ISO2PDFfit(Phase): |
---|
4181 | ''' Creates new phase structure to be used for PDFfit from an ISODISTORT mode displacement phase. |
---|
4182 | It builds the distortion mode parameters to be used as PDFfit variables for atom displacements from |
---|
4183 | the original parent positions as transformed to the child cell wiht symmetry defined from ISODISTORT. |
---|
4184 | |
---|
4185 | :param Phase: dict GSAS-II Phase structure; must contain ISODISTORT dict. NB: not accessed otherwise |
---|
4186 | |
---|
4187 | :returns: dict: GSAS-II Phase structure; will contain ['RMC']['PDFfit'] dict |
---|
4188 | ''' |
---|
4189 | |
---|
4190 | Trans = np.eye(3) |
---|
4191 | Uvec = np.zeros(3) |
---|
4192 | Vvec = np.zeros(3) |
---|
4193 | Phase = copy.deepcopy(Phase) |
---|
4194 | Atoms = Phase['Atoms'] |
---|
4195 | parentXYZ = Phase['ISODISTORT']['G2parentCoords'] #starting point for mode displacements |
---|
4196 | cx,ct,cs,cia = Phase['General']['AtomPtrs'] |
---|
4197 | for iat,atom in enumerate(Atoms): |
---|
4198 | atom[cx:cx+3] = parentXYZ[iat] |
---|
4199 | SGData = copy.deepcopy(Phase['General']['SGData']) |
---|
4200 | SGOps = SGData['SGOps'] |
---|
4201 | newPhase = copy.deepcopy(Phase) |
---|
4202 | newPhase['ranId'] = rand.randint(0,sys.maxsize) |
---|
4203 | newPhase['General']['Name'] += '_PDFfit' |
---|
4204 | newPhase['General']['SGData'] = G2spc.SpcGroup('P 1')[1] #this is for filled unit cell |
---|
4205 | newPhase,atCodes = G2lat.TransformPhase(Phase,newPhase,Trans,Uvec,Vvec,False) |
---|
4206 | newPhase['Histograms'] = {} |
---|
4207 | newPhase['Drawing'] = [] |
---|
4208 | Atoms = newPhase['Atoms'] |
---|
4209 | RMCPdict = newPhase['RMC']['PDFfit'] |
---|
4210 | ISOdict = newPhase['ISODISTORT'] |
---|
4211 | RMCPdict['AtomConstr'] = [] |
---|
4212 | RMCPdict['SGData'] = copy.deepcopy(SGData) #this is from the ISODISTORT child; defines PDFfit constraints |
---|
4213 | Norms = ISOdict['NormList'] |
---|
4214 | ModeMatrix = ISOdict['Mode2VarMatrix'] |
---|
4215 | RMCPdict['AtomVar'] = {'@%d'%(itm+21):val for itm,val in enumerate(ISOdict['modeDispl'])} |
---|
4216 | for iatm,[atom,atcode] in enumerate(zip(Atoms,atCodes)): |
---|
4217 | pid,opid = [int(item) for item in atcode.split(':')] |
---|
4218 | atmConstr = [atom[ct-1],atom[ct],'','','','','',atcode] |
---|
4219 | for ip,pname in enumerate(['%s_d%s'%(atom[ct-1],x) for x in ['x','y','z']]): |
---|
4220 | try: |
---|
4221 | conStr = '' |
---|
4222 | if Atoms[iatm][cx+ip]: |
---|
4223 | conStr += '%.5f'%Atoms[iatm][cx+ip] |
---|
4224 | pid = ISOdict['IsoVarList'].index(pname) |
---|
4225 | consVec = ModeMatrix[pid] |
---|
4226 | for ic,citm in enumerate(consVec): #NB: this assumes orthorhombic or lower symmetry |
---|
4227 | if opid < 0: |
---|
4228 | citm *= -SGOps[100-opid%100-1][0][ip][ip] #remove centering, if any |
---|
4229 | else: |
---|
4230 | citm *= SGOps[opid%100-1][0][ip][ip] |
---|
4231 | if citm > 0.: |
---|
4232 | conStr += '+%.5f*@%d'%(citm*Norms[ic],ic+21) |
---|
4233 | elif citm < 0.: |
---|
4234 | conStr += '%.5f*@%d'%(citm*Norms[ic],ic+21) |
---|
4235 | atmConstr[ip+2] = conStr |
---|
4236 | except ValueError: |
---|
4237 | atmConstr[ip+2] = '' |
---|
4238 | RMCPdict['AtomConstr'].append(atmConstr) |
---|
4239 | return newPhase |
---|
4240 | |
---|
4241 | def GetAtmDispList(ISOdata): |
---|
4242 | atmDispList = [] |
---|
4243 | MT = ISOdata['Mode2VarMatrix'].T |
---|
4244 | DispList = ISOdata['IsoVarList'] |
---|
4245 | N = len(DispList) |
---|
4246 | for I in range(N): |
---|
4247 | vary = [] |
---|
4248 | for i in range(N): |
---|
4249 | if MT[I,i]: |
---|
4250 | vary.append(DispList[i]) |
---|
4251 | atmDispList.append(vary) |
---|
4252 | return atmDispList |
---|
4253 | |
---|
4254 | #### Reflectometry calculations ################################################################################ |
---|
4255 | def REFDRefine(Profile,ProfDict,Inst,Limits,Substances,data): |
---|
4256 | G2fil.G2Print ('fit REFD data by '+data['Minimizer']+' using %.2f%% data resolution'%(data['Resolution'][0])) |
---|
4257 | |
---|
4258 | class RandomDisplacementBounds(object): |
---|
4259 | """random displacement with bounds""" |
---|
4260 | def __init__(self, xmin, xmax, stepsize=0.5): |
---|
4261 | self.xmin = xmin |
---|
4262 | self.xmax = xmax |
---|
4263 | self.stepsize = stepsize |
---|
4264 | |
---|
4265 | def __call__(self, x): |
---|
4266 | """take a random step but ensure the new position is within the bounds""" |
---|
4267 | while True: |
---|
4268 | # this could be done in a much more clever way, but it will work for example purposes |
---|
4269 | steps = self.xmax-self.xmin |
---|
4270 | xnew = x + np.random.uniform(-self.stepsize*steps, self.stepsize*steps, np.shape(x)) |
---|
4271 | if np.all(xnew < self.xmax) and np.all(xnew > self.xmin): |
---|
4272 | break |
---|
4273 | return xnew |
---|
4274 | |
---|
4275 | def GetModelParms(): |
---|
4276 | parmDict = {} |
---|
4277 | varyList = [] |
---|
4278 | values = [] |
---|
4279 | bounds = [] |
---|
4280 | parmDict['dQ type'] = data['dQ type'] |
---|
4281 | parmDict['Res'] = data['Resolution'][0]/(100.*sateln2) #% FWHM-->decimal sig |
---|
4282 | for parm in ['Scale','FltBack']: |
---|
4283 | parmDict[parm] = data[parm][0] |
---|
4284 | if data[parm][1]: |
---|
4285 | varyList.append(parm) |
---|
4286 | values.append(data[parm][0]) |
---|
4287 | bounds.append(Bounds[parm]) |
---|
4288 | parmDict['Layer Seq'] = np.array(['0',]+data['Layer Seq'].split()+[str(len(data['Layers'])-1),],dtype=int) |
---|
4289 | parmDict['nLayers'] = len(parmDict['Layer Seq']) |
---|
4290 | for ilay,layer in enumerate(data['Layers']): |
---|
4291 | name = layer['Name'] |
---|
4292 | cid = str(ilay)+';' |
---|
4293 | parmDict[cid+'Name'] = name |
---|
4294 | for parm in ['Thick','Rough','DenMul','Mag SLD','iDenMul']: |
---|
4295 | parmDict[cid+parm] = layer.get(parm,[0.,False])[0] |
---|
4296 | if layer.get(parm,[0.,False])[1]: |
---|
4297 | varyList.append(cid+parm) |
---|
4298 | value = layer[parm][0] |
---|
4299 | values.append(value) |
---|
4300 | if value: |
---|
4301 | bound = [value*Bfac,value/Bfac] |
---|
4302 | else: |
---|
4303 | bound = [0.,10.] |
---|
4304 | bounds.append(bound) |
---|
4305 | if name not in ['vacuum','unit scatter']: |
---|
4306 | parmDict[cid+'rho'] = Substances[name]['Scatt density'] |
---|
4307 | parmDict[cid+'irho'] = Substances[name].get('XImag density',0.) |
---|
4308 | return parmDict,varyList,values,bounds |
---|
4309 | |
---|
4310 | def SetModelParms(): |
---|
4311 | line = ' Refined parameters: Histogram scale: %.4g'%(parmDict['Scale']) |
---|
4312 | if 'Scale' in varyList: |
---|
4313 | data['Scale'][0] = parmDict['Scale'] |
---|
4314 | line += ' esd: %.4g'%(sigDict['Scale']) |
---|
4315 | G2fil.G2Print (line) |
---|
4316 | line = ' Flat background: %15.4g'%(parmDict['FltBack']) |
---|
4317 | if 'FltBack' in varyList: |
---|
4318 | data['FltBack'][0] = parmDict['FltBack'] |
---|
4319 | line += ' esd: %15.3g'%(sigDict['FltBack']) |
---|
4320 | G2fil.G2Print (line) |
---|
4321 | for ilay,layer in enumerate(data['Layers']): |
---|
4322 | name = layer['Name'] |
---|
4323 | G2fil.G2Print (' Parameters for layer: %d %s'%(ilay,name)) |
---|
4324 | cid = str(ilay)+';' |
---|
4325 | line = ' ' |
---|
4326 | line2 = ' Scattering density: Real %.5g'%(Substances[name]['Scatt density']*parmDict[cid+'DenMul']) |
---|
4327 | line2 += ' Imag %.5g'%(Substances[name].get('XImag density',0.)*parmDict[cid+'DenMul']) |
---|
4328 | for parm in ['Thick','Rough','DenMul','Mag SLD','iDenMul']: |
---|
4329 | if parm in layer: |
---|
4330 | if parm == 'Rough': |
---|
4331 | layer[parm][0] = abs(parmDict[cid+parm]) #make positive |
---|
4332 | else: |
---|
4333 | layer[parm][0] = parmDict[cid+parm] |
---|
4334 | line += ' %s: %.3f'%(parm,layer[parm][0]) |
---|
4335 | if cid+parm in varyList: |
---|
4336 | line += ' esd: %.3g'%(sigDict[cid+parm]) |
---|
4337 | G2fil.G2Print (line) |
---|
4338 | G2fil.G2Print (line2) |
---|
4339 | |
---|
4340 | def calcREFD(values,Q,Io,wt,Qsig,parmDict,varyList): |
---|
4341 | parmDict.update(zip(varyList,values)) |
---|
4342 | M = np.sqrt(wt)*(getREFD(Q,Qsig,parmDict)-Io) |
---|
4343 | return M |
---|
4344 | |
---|
4345 | def sumREFD(values,Q,Io,wt,Qsig,parmDict,varyList): |
---|
4346 | parmDict.update(zip(varyList,values)) |
---|
4347 | M = np.sqrt(wt)*(getREFD(Q,Qsig,parmDict)-Io) |
---|
4348 | return np.sum(M**2) |
---|
4349 | |
---|
4350 | def getREFD(Q,Qsig,parmDict): |
---|
4351 | Ic = np.ones_like(Q)*parmDict['FltBack'] |
---|
4352 | Scale = parmDict['Scale'] |
---|
4353 | Nlayers = parmDict['nLayers'] |
---|
4354 | Res = parmDict['Res'] |
---|
4355 | depth = np.zeros(Nlayers) |
---|
4356 | rho = np.zeros(Nlayers) |
---|
4357 | irho = np.zeros(Nlayers) |
---|
4358 | sigma = np.zeros(Nlayers) |
---|
4359 | for ilay,lay in enumerate(parmDict['Layer Seq']): |
---|
4360 | cid = str(lay)+';' |
---|
4361 | depth[ilay] = parmDict[cid+'Thick'] |
---|
4362 | sigma[ilay] = parmDict[cid+'Rough'] |
---|
4363 | if parmDict[cid+'Name'] == u'unit scatter': |
---|
4364 | rho[ilay] = parmDict[cid+'DenMul'] |
---|
4365 | irho[ilay] = parmDict[cid+'iDenMul'] |
---|
4366 | elif 'vacuum' != parmDict[cid+'Name']: |
---|
4367 | rho[ilay] = parmDict[cid+'rho']*parmDict[cid+'DenMul'] |
---|
4368 | irho[ilay] = parmDict[cid+'irho']*parmDict[cid+'DenMul'] |
---|
4369 | if cid+'Mag SLD' in parmDict: |
---|
4370 | rho[ilay] += parmDict[cid+'Mag SLD'] |
---|
4371 | if parmDict['dQ type'] == 'None': |
---|
4372 | AB = abeles(0.5*Q,depth,rho,irho,sigma[1:]) #Q --> k, offset roughness for abeles |
---|
4373 | elif 'const' in parmDict['dQ type']: |
---|
4374 | AB = SmearAbeles(0.5*Q,Q*Res,depth,rho,irho,sigma[1:]) |
---|
4375 | else: #dQ/Q in data |
---|
4376 | AB = SmearAbeles(0.5*Q,Qsig,depth,rho,irho,sigma[1:]) |
---|
4377 | Ic += AB*Scale |
---|
4378 | return Ic |
---|
4379 | |
---|
4380 | def estimateT0(takestep): |
---|
4381 | Mmax = -1.e-10 |
---|
4382 | Mmin = 1.e10 |
---|
4383 | for i in range(100): |
---|
4384 | x0 = takestep(values) |
---|
4385 | M = sumREFD(x0,Q[Ibeg:Ifin],Io[Ibeg:Ifin],wtFactor*wt[Ibeg:Ifin],Qsig[Ibeg:Ifin],parmDict,varyList) |
---|
4386 | Mmin = min(M,Mmin) |
---|
4387 | MMax = max(M,Mmax) |
---|
4388 | return 1.5*(MMax-Mmin) |
---|
4389 | |
---|
4390 | Q,Io,wt,Ic,Ib,Qsig = Profile[:6] |
---|
4391 | if data.get('2% weight'): |
---|
4392 | wt = 1./(0.02*Io)**2 |
---|
4393 | Qmin = Limits[1][0] |
---|
4394 | Qmax = Limits[1][1] |
---|
4395 | wtFactor = ProfDict['wtFactor'] |
---|
4396 | Bfac = data['Toler'] |
---|
4397 | Ibeg = np.searchsorted(Q,Qmin) |
---|
4398 | Ifin = np.searchsorted(Q,Qmax)+1 #include last point |
---|
4399 | Ic[:] = 0 |
---|
4400 | Bounds = {'Scale':[data['Scale'][0]*Bfac,data['Scale'][0]/Bfac],'FltBack':[0.,1.e-6], |
---|
4401 | 'DenMul':[0.,1.],'Thick':[1.,500.],'Rough':[0.,10.],'Mag SLD':[-10.,10.],'iDenMul':[-1.,1.]} |
---|
4402 | parmDict,varyList,values,bounds = GetModelParms() |
---|
4403 | Msg = 'Failed to converge' |
---|
4404 | if varyList: |
---|
4405 | if data['Minimizer'] == 'LMLS': |
---|
4406 | result = so.leastsq(calcREFD,values,full_output=True,epsfcn=1.e-8,ftol=1.e-6, |
---|
4407 | args=(Q[Ibeg:Ifin],Io[Ibeg:Ifin],wtFactor*wt[Ibeg:Ifin],Qsig[Ibeg:Ifin],parmDict,varyList)) |
---|
4408 | parmDict.update(zip(varyList,result[0])) |
---|
4409 | chisq = np.sum(result[2]['fvec']**2) |
---|
4410 | ncalc = result[2]['nfev'] |
---|
4411 | covM = result[1] |
---|
4412 | newVals = result[0] |
---|
4413 | elif data['Minimizer'] == 'Basin Hopping': |
---|
4414 | xyrng = np.array(bounds).T |
---|
4415 | take_step = RandomDisplacementBounds(xyrng[0], xyrng[1]) |
---|
4416 | T0 = estimateT0(take_step) |
---|
4417 | G2fil.G2Print (' Estimated temperature: %.3g'%(T0)) |
---|
4418 | result = so.basinhopping(sumREFD,values,take_step=take_step,disp=True,T=T0,stepsize=Bfac, |
---|
4419 | interval=20,niter=200,minimizer_kwargs={'method':'L-BFGS-B','bounds':bounds, |
---|
4420 | 'args':(Q[Ibeg:Ifin],Io[Ibeg:Ifin],wtFactor*wt[Ibeg:Ifin],Qsig[Ibeg:Ifin],parmDict,varyList)}) |
---|
4421 | chisq = result.fun |
---|
4422 | ncalc = result.nfev |
---|
4423 | newVals = result.x |
---|
4424 | covM = [] |
---|
4425 | elif data['Minimizer'] == 'MC/SA Anneal': |
---|
4426 | xyrng = np.array(bounds).T |
---|
4427 | result = G2mth.anneal(sumREFD, values, |
---|
4428 | args=(Q[Ibeg:Ifin],Io[Ibeg:Ifin],wtFactor*wt[Ibeg:Ifin],Qsig[Ibeg:Ifin],parmDict,varyList), |
---|
4429 | schedule='log', full_output=True,maxeval=None, maxaccept=None, maxiter=10,dwell=1000, |
---|
4430 | boltzmann=10.0, feps=1e-6,lower=xyrng[0], upper=xyrng[1], slope=0.9,ranStart=True, |
---|
4431 | ranRange=0.20,autoRan=False,dlg=None) |
---|
4432 | newVals = result[0] |
---|
4433 | parmDict.update(zip(varyList,newVals)) |
---|
4434 | chisq = result[1] |
---|
4435 | ncalc = result[3] |
---|
4436 | covM = [] |
---|
4437 | G2fil.G2Print (' MC/SA final temperature: %.4g'%(result[2])) |
---|
4438 | elif data['Minimizer'] == 'L-BFGS-B': |
---|
4439 | result = so.minimize(sumREFD,values,method='L-BFGS-B',bounds=bounds, #ftol=Ftol, |
---|
4440 | args=(Q[Ibeg:Ifin],Io[Ibeg:Ifin],wtFactor*wt[Ibeg:Ifin],Qsig[Ibeg:Ifin],parmDict,varyList)) |
---|
4441 | parmDict.update(zip(varyList,result['x'])) |
---|
4442 | chisq = result.fun |
---|
4443 | ncalc = result.nfev |
---|
4444 | newVals = result.x |
---|
4445 | covM = [] |
---|
4446 | else: #nothing varied |
---|
4447 | M = calcREFD(values,Q[Ibeg:Ifin],Io[Ibeg:Ifin],wtFactor*wt[Ibeg:Ifin],Qsig[Ibeg:Ifin],parmDict,varyList) |
---|
4448 | chisq = np.sum(M**2) |
---|
4449 | ncalc = 0 |
---|
4450 | covM = [] |
---|
4451 | sig = [] |
---|
4452 | sigDict = {} |
---|
4453 | result = [] |
---|
4454 | Rvals = {} |
---|
4455 | Rvals['Rwp'] = np.sqrt(chisq/np.sum(wt[Ibeg:Ifin]*Io[Ibeg:Ifin]**2))*100. #to % |
---|
4456 | Rvals['GOF'] = chisq/(Ifin-Ibeg-len(varyList)) #reduced chi^2 |
---|
4457 | Ic[Ibeg:Ifin] = getREFD(Q[Ibeg:Ifin],Qsig[Ibeg:Ifin],parmDict) |
---|
4458 | Ib[Ibeg:Ifin] = parmDict['FltBack'] |
---|
4459 | try: |
---|
4460 | if not len(varyList): |
---|
4461 | Msg += ' - nothing refined' |
---|
4462 | raise ValueError |
---|
4463 | Nans = np.isnan(newVals) |
---|
4464 | if np.any(Nans): |
---|
4465 | name = varyList[Nans.nonzero(True)[0]] |
---|
4466 | Msg += ' Nan result for '+name+'!' |
---|
4467 | raise ValueError |
---|
4468 | Negs = np.less_equal(newVals,0.) |
---|
4469 | if np.any(Negs): |
---|
4470 | indx = Negs.nonzero() |
---|
4471 | name = varyList[indx[0][0]] |
---|
4472 | if name != 'FltBack' and name.split(';')[1] in ['Thick',]: |
---|
4473 | Msg += ' negative coefficient for '+name+'!' |
---|
4474 | raise ValueError |
---|
4475 | if len(covM): |
---|
4476 | sig = np.sqrt(np.diag(covM)*Rvals['GOF']) |
---|
4477 | covMatrix = covM*Rvals['GOF'] |
---|
4478 | else: |
---|
4479 | sig = np.zeros(len(varyList)) |
---|
4480 | covMatrix = [] |
---|
4481 | sigDict = dict(zip(varyList,sig)) |
---|
4482 | G2fil.G2Print (' Results of reflectometry data modelling fit:') |
---|
4483 | G2fil.G2Print ('Number of function calls: %d Number of observations: %d Number of parameters: %d'%(ncalc,Ifin-Ibeg,len(varyList))) |
---|
4484 | G2fil.G2Print ('Rwp = %7.2f%%, chi**2 = %12.6g, reduced chi**2 = %6.2f'%(Rvals['Rwp'],chisq,Rvals['GOF'])) |
---|
4485 | SetModelParms() |
---|
4486 | return True,result,varyList,sig,Rvals,covMatrix,parmDict,'' |
---|
4487 | except (ValueError,TypeError): #when bad LS refinement; covM missing or with nans |
---|
4488 | G2fil.G2Print (Msg) |
---|
4489 | return False,0,0,0,0,0,0,Msg |
---|
4490 | |
---|
4491 | def makeSLDprofile(data,Substances): |
---|
4492 | |
---|
4493 | sq2 = np.sqrt(2.) |
---|
4494 | laySeq = ['0',]+data['Layer Seq'].split()+[str(len(data['Layers'])-1),] |
---|
4495 | Nlayers = len(laySeq) |
---|
4496 | laySeq = np.array(laySeq,dtype=int) |
---|
4497 | interfaces = np.zeros(Nlayers) |
---|
4498 | rho = np.zeros(Nlayers) |
---|
4499 | sigma = np.zeros(Nlayers) |
---|
4500 | name = data['Layers'][0]['Name'] |
---|
4501 | thick = 0. |
---|
4502 | for ilay,lay in enumerate(laySeq): |
---|
4503 | layer = data['Layers'][lay] |
---|
4504 | name = layer['Name'] |
---|
4505 | if 'Thick' in layer: |
---|
4506 | thick += layer['Thick'][0] |
---|
4507 | interfaces[ilay] = layer['Thick'][0]+interfaces[ilay-1] |
---|
4508 | if 'Rough' in layer: |
---|
4509 | sigma[ilay] = max(0.001,layer['Rough'][0]) |
---|
4510 | if name != 'vacuum': |
---|
4511 | if name == 'unit scatter': |
---|
4512 | rho[ilay] = np.sqrt(layer['DenMul'][0]**2+layer['iDenMul'][0]**2) |
---|
4513 | else: |
---|
4514 | rrho = Substances[name]['Scatt density'] |
---|
4515 | irho = Substances[name]['XImag density'] |
---|
4516 | rho[ilay] = np.sqrt(rrho**2+irho**2)*layer['DenMul'][0] |
---|
4517 | if 'Mag SLD' in layer: |
---|
4518 | rho[ilay] += layer['Mag SLD'][0] |
---|
4519 | name = data['Layers'][-1]['Name'] |
---|
4520 | x = np.linspace(-0.15*thick,1.15*thick,1000,endpoint=True) |
---|
4521 | xr = np.flipud(x) |
---|
4522 | interfaces[-1] = x[-1] |
---|
4523 | y = np.ones_like(x)*rho[0] |
---|
4524 | iBeg = 0 |
---|
4525 | for ilayer in range(Nlayers-1): |
---|
4526 | delt = rho[ilayer+1]-rho[ilayer] |
---|
4527 | iPos = np.searchsorted(x,interfaces[ilayer]) |
---|
4528 | y[iBeg:] += (delt/2.)*sp.erfc((interfaces[ilayer]-x[iBeg:])/(sq2*sigma[ilayer+1])) |
---|
4529 | iBeg = iPos |
---|
4530 | return x,xr,y |
---|
4531 | |
---|
4532 | def REFDModelFxn(Profile,Inst,Limits,Substances,data): |
---|
4533 | |
---|
4534 | Q,Io,wt,Ic,Ib,Qsig = Profile[:6] |
---|
4535 | Qmin = Limits[1][0] |
---|
4536 | Qmax = Limits[1][1] |
---|
4537 | iBeg = np.searchsorted(Q,Qmin) |
---|
4538 | iFin = np.searchsorted(Q,Qmax)+1 #include last point |
---|
4539 | Ib[:] = data['FltBack'][0] |
---|
4540 | Ic[:] = 0 |
---|
4541 | Scale = data['Scale'][0] |
---|
4542 | if data['Layer Seq'] == []: |
---|
4543 | return |
---|
4544 | laySeq = ['0',]+data['Layer Seq'].split()+[str(len(data['Layers'])-1),] |
---|
4545 | Nlayers = len(laySeq) |
---|
4546 | depth = np.zeros(Nlayers) |
---|
4547 | rho = np.zeros(Nlayers) |
---|
4548 | irho = np.zeros(Nlayers) |
---|
4549 | sigma = np.zeros(Nlayers) |
---|
4550 | for ilay,lay in enumerate(np.array(laySeq,dtype=int)): |
---|
4551 | layer = data['Layers'][lay] |
---|
4552 | name = layer['Name'] |
---|
4553 | if 'Thick' in layer: #skips first & last layers |
---|
4554 | depth[ilay] = layer['Thick'][0] |
---|
4555 | if 'Rough' in layer: #skips first layer |
---|
4556 | sigma[ilay] = layer['Rough'][0] |
---|
4557 | if 'unit scatter' == name: |
---|
4558 | rho[ilay] = layer['DenMul'][0] |
---|
4559 | irho[ilay] = layer['iDenMul'][0] |
---|
4560 | else: |
---|
4561 | rho[ilay] = Substances[name]['Scatt density']*layer['DenMul'][0] |
---|
4562 | irho[ilay] = Substances[name].get('XImag density',0.)*layer['DenMul'][0] |
---|
4563 | if 'Mag SLD' in layer: |
---|
4564 | rho[ilay] += layer['Mag SLD'][0] |
---|
4565 | if data['dQ type'] == 'None': |
---|
4566 | AB = abeles(0.5*Q[iBeg:iFin],depth,rho,irho,sigma[1:]) #Q --> k, offset roughness for abeles |
---|
4567 | elif 'const' in data['dQ type']: |
---|
4568 | res = data['Resolution'][0]/(100.*sateln2) |
---|
4569 | AB = SmearAbeles(0.5*Q[iBeg:iFin],res*Q[iBeg:iFin],depth,rho,irho,sigma[1:]) |
---|
4570 | else: #dQ/Q in data |
---|
4571 | AB = SmearAbeles(0.5*Q[iBeg:iFin],Qsig[iBeg:iFin],depth,rho,irho,sigma[1:]) |
---|
4572 | Ic[iBeg:iFin] = AB*Scale+Ib[iBeg:iFin] |
---|
4573 | |
---|
4574 | def abeles(kz, depth, rho, irho=0, sigma=0): |
---|
4575 | """ |
---|
4576 | Optical matrix form of the reflectivity calculation. |
---|
4577 | O.S. Heavens, Optical Properties of Thin Solid Films |
---|
4578 | |
---|
4579 | Reflectometry as a function of kz for a set of slabs. |
---|
4580 | |
---|
4581 | :param kz: float[n] (1/Ang). Scattering vector, :math:`2\\pi\\sin(\\theta)/\\lambda`. |
---|
4582 | This is :math:`\\tfrac12 Q_z`. |
---|
4583 | :param depth: float[m] (Ang). |
---|
4584 | thickness of each layer. The thickness of the incident medium |
---|
4585 | and substrate are ignored. |
---|
4586 | :param rho: float[n,k] (1e-6/Ang^2) |
---|
4587 | Real scattering length density for each layer for each kz |
---|
4588 | :param irho: float[n,k] (1e-6/Ang^2) |
---|
4589 | Imaginary scattering length density for each layer for each kz |
---|
4590 | Note: absorption cross section mu = 2 irho/lambda for neutrons |
---|
4591 | :param sigma: float[m-1] (Ang) |
---|
4592 | interfacial roughness. This is the roughness between a layer |
---|
4593 | and the previous layer. The sigma array should have m-1 entries. |
---|
4594 | |
---|
4595 | Slabs are ordered with the surface SLD at index 0 and substrate at |
---|
4596 | index -1, or reversed if kz < 0. |
---|
4597 | """ |
---|
4598 | def calc(kz, depth, rho, irho, sigma): |
---|
4599 | if len(kz) == 0: return kz |
---|
4600 | |
---|
4601 | # Complex index of refraction is relative to the incident medium. |
---|
4602 | # We can get the same effect using kz_rel^2 = kz^2 + 4*pi*rho_o |
---|
4603 | # in place of kz^2, and ignoring rho_o |
---|
4604 | kz_sq = kz**2 + 4e-6*np.pi*rho[:,0] |
---|
4605 | k = kz |
---|
4606 | |
---|
4607 | # According to Heavens, the initial matrix should be [ 1 F; F 1], |
---|
4608 | # which we do by setting B=I and M0 to [1 F; F 1]. An extra matrix |
---|
4609 | # multiply versus some coding convenience. |
---|
4610 | B11 = 1 |
---|
4611 | B22 = 1 |
---|
4612 | B21 = 0 |
---|
4613 | B12 = 0 |
---|
4614 | for i in range(0, len(depth)-1): |
---|
4615 | k_next = np.sqrt(kz_sq - 4e-6*np.pi*(rho[:,i+1] + 1j*irho[:,i+1])) |
---|
4616 | F = (k - k_next) / (k + k_next) |
---|
4617 | F *= np.exp(-2*k*k_next*sigma[i]**2) |
---|
4618 | #print "==== layer",i |
---|
4619 | #print "kz:", kz |
---|
4620 | #print "k:", k |
---|
4621 | #print "k_next:",k_next |
---|
4622 | #print "F:",F |
---|
4623 | #print "rho:",rho[:,i+1] |
---|
4624 | #print "irho:",irho[:,i+1] |
---|
4625 | #print "d:",depth[i],"sigma:",sigma[i] |
---|
4626 | M11 = np.exp(1j*k*depth[i]) if i>0 else 1 |
---|
4627 | M22 = np.exp(-1j*k*depth[i]) if i>0 else 1 |
---|
4628 | M21 = F*M11 |
---|
4629 | M12 = F*M22 |
---|
4630 | C1 = B11*M11 + B21*M12 |
---|
4631 | C2 = B11*M21 + B21*M22 |
---|
4632 | B11 = C1 |
---|
4633 | B21 = C2 |
---|
4634 | C1 = B12*M11 + B22*M12 |
---|
4635 | C2 = B12*M21 + B22*M22 |
---|
4636 | B12 = C1 |
---|
4637 | B22 = C2 |
---|
4638 | k = k_next |
---|
4639 | |
---|
4640 | r = B12/B11 |
---|
4641 | return np.absolute(r)**2 |
---|
4642 | |
---|
4643 | if np.isscalar(kz): kz = np.asarray([kz], 'd') |
---|
4644 | |
---|
4645 | m = len(depth) |
---|
4646 | |
---|
4647 | # Make everything into arrays |
---|
4648 | depth = np.asarray(depth,'d') |
---|
4649 | rho = np.asarray(rho,'d') |
---|
4650 | irho = irho*np.ones_like(rho) if np.isscalar(irho) else np.asarray(irho,'d') |
---|
4651 | sigma = sigma*np.ones(m-1,'d') if np.isscalar(sigma) else np.asarray(sigma,'d') |
---|
4652 | |
---|
4653 | # Repeat rho,irho columns as needed |
---|
4654 | if len(rho.shape) == 1: |
---|
4655 | rho = rho[None,:] |
---|
4656 | irho = irho[None,:] |
---|
4657 | |
---|
4658 | return calc(kz, depth, rho, irho, sigma) |
---|
4659 | |
---|
4660 | def SmearAbeles(kz,dq, depth, rho, irho=0, sigma=0): |
---|
4661 | y = abeles(kz, depth, rho, irho, sigma) |
---|
4662 | s = dq/2. |
---|
4663 | y += 0.1354*(abeles(kz+2*s, depth, rho, irho, sigma)+abeles(kz-2*s, depth, rho, irho, sigma)) |
---|
4664 | y += 0.24935*(abeles(kz-5*s/3., depth, rho, irho, sigma)+abeles(kz+5*s/3., depth, rho, irho, sigma)) |
---|
4665 | y += 0.4111*(abeles(kz-4*s/3., depth, rho, irho, sigma)+abeles(kz+4*s/3., depth, rho, irho, sigma)) |
---|
4666 | y += 0.60653*(abeles(kz-s, depth, rho, irho, sigma) +abeles(kz+s, depth, rho, irho, sigma)) |
---|
4667 | y += 0.80074*(abeles(kz-2*s/3., depth, rho, irho, sigma)+abeles(kz-2*s/3., depth, rho, irho, sigma)) |
---|
4668 | y += 0.94596*(abeles(kz-s/3., depth, rho, irho, sigma)+abeles(kz-s/3., depth, rho, irho, sigma)) |
---|
4669 | y *= 0.137023 |
---|
4670 | return y |
---|
4671 | |
---|
4672 | def makeRefdFFT(Limits,Profile): |
---|
4673 | G2fil.G2Print ('make fft') |
---|
4674 | Q,Io = Profile[:2] |
---|
4675 | Qmin = Limits[1][0] |
---|
4676 | Qmax = Limits[1][1] |
---|
4677 | iBeg = np.searchsorted(Q,Qmin) |
---|
4678 | iFin = np.searchsorted(Q,Qmax)+1 #include last point |
---|
4679 | Qf = np.linspace(0.,Q[iFin-1],5000) |
---|
4680 | QI = si.interp1d(Q[iBeg:iFin],Io[iBeg:iFin],bounds_error=False,fill_value=0.0) |
---|
4681 | If = QI(Qf)*Qf**4 |
---|
4682 | R = np.linspace(0.,5000.,5000) |
---|
4683 | F = fft.rfft(If) |
---|
4684 | return R,F |
---|
4685 | |
---|
4686 | |
---|
4687 | #### Stacking fault simulation codes ################################################################################ |
---|
4688 | def GetStackParms(Layers): |
---|
4689 | |
---|
4690 | Parms = [] |
---|
4691 | #cell parms |
---|
4692 | if Layers['Laue'] in ['-3','-3m','4/m','4/mmm','6/m','6/mmm']: |
---|
4693 | Parms.append('cellA') |
---|
4694 | Parms.append('cellC') |
---|
4695 | else: |
---|
4696 | Parms.append('cellA') |
---|
4697 | Parms.append('cellB') |
---|
4698 | Parms.append('cellC') |
---|
4699 | if Layers['Laue'] != 'mmm': |
---|
4700 | Parms.append('cellG') |
---|
4701 | #Transition parms |
---|
4702 | for iY in range(len(Layers['Layers'])): |
---|
4703 | for iX in range(len(Layers['Layers'])): |
---|
4704 | Parms.append('TransP;%d;%d'%(iY,iX)) |
---|
4705 | Parms.append('TransX;%d;%d'%(iY,iX)) |
---|
4706 | Parms.append('TransY;%d;%d'%(iY,iX)) |
---|
4707 | Parms.append('TransZ;%d;%d'%(iY,iX)) |
---|
4708 | return Parms |
---|
4709 | |
---|
4710 | def StackSim(Layers,ctrls,scale=0.,background={},limits=[],inst={},profile=[]): |
---|
4711 | '''Simulate powder or selected area diffraction pattern from stacking faults using DIFFaX |
---|
4712 | |
---|
4713 | :param dict Layers: dict with following items |
---|
4714 | |
---|
4715 | :: |
---|
4716 | |
---|
4717 | {'Laue':'-1','Cell':[False,1.,1.,1.,90.,90.,90,1.], |
---|
4718 | 'Width':[[10.,10.],[False,False]],'Toler':0.01,'AtInfo':{}, |
---|
4719 | 'Layers':[],'Stacking':[],'Transitions':[]} |
---|
4720 | |
---|
4721 | :param str ctrls: controls string to be written on DIFFaX controls.dif file |
---|
4722 | :param float scale: scale factor |
---|
4723 | :param dict background: background parameters |
---|
4724 | :param list limits: min/max 2-theta to be calculated |
---|
4725 | :param dict inst: instrument parameters dictionary |
---|
4726 | :param list profile: powder pattern data |
---|
4727 | |
---|
4728 | Note that parameters all updated in place |
---|
4729 | ''' |
---|
4730 | import atmdata |
---|
4731 | path = sys.path |
---|
4732 | for name in path: |
---|
4733 | if 'bin' in name: |
---|
4734 | DIFFaX = name+'/DIFFaX.exe' |
---|
4735 | G2fil.G2Print (' Execute '+DIFFaX) |
---|
4736 | break |
---|
4737 | # make form factor file that DIFFaX wants - atom types are GSASII style |
---|
4738 | sf = open('data.sfc','w') |
---|
4739 | sf.write('GSASII special form factor file for DIFFaX\n\n') |
---|
4740 | atTypes = list(Layers['AtInfo'].keys()) |
---|
4741 | if 'H' not in atTypes: |
---|
4742 | atTypes.insert(0,'H') |
---|
4743 | for atType in atTypes: |
---|
4744 | if atType == 'H': |
---|
4745 | blen = -.3741 |
---|
4746 | else: |
---|
4747 | blen = Layers['AtInfo'][atType]['Isotopes']['Nat. Abund.']['SL'][0] |
---|
4748 | Adat = atmdata.XrayFF[atType] |
---|
4749 | text = '%4s'%(atType.ljust(4)) |
---|
4750 | for i in range(4): |
---|
4751 | text += '%11.6f%11.6f'%(Adat['fa'][i],Adat['fb'][i]) |
---|
4752 | text += '%11.6f%11.6f'%(Adat['fc'],blen) |
---|
4753 | text += '%3d\n'%(Adat['Z']) |
---|
4754 | sf.write(text) |
---|
4755 | sf.close() |
---|
4756 | #make DIFFaX control.dif file - future use GUI to set some of these flags |
---|
4757 | cf = open('control.dif','w') |
---|
4758 | if ctrls == '0\n0\n3\n' or ctrls == '0\n1\n3\n': |
---|
4759 | x0 = profile[0] |
---|
4760 | iBeg = np.searchsorted(x0,limits[0]) |
---|
4761 | iFin = np.searchsorted(x0,limits[1])+1 |
---|
4762 | if iFin-iBeg > 20000: |
---|
4763 | iFin = iBeg+20000 |
---|
4764 | Dx = (x0[iFin]-x0[iBeg])/(iFin-iBeg) |
---|
4765 | cf.write('GSASII-DIFFaX.dat\n'+ctrls) |
---|
4766 | cf.write('%.6f %.6f %.6f\n1\n1\nend\n'%(x0[iBeg],x0[iFin],Dx)) |
---|
4767 | else: |
---|
4768 | cf.write('GSASII-DIFFaX.dat\n'+ctrls) |
---|
4769 | inst = {'Type':['XSC','XSC',]} |
---|
4770 | cf.close() |
---|
4771 | #make DIFFaX data file |
---|
4772 | df = open('GSASII-DIFFaX.dat','w') |
---|
4773 | df.write('INSTRUMENTAL\n') |
---|
4774 | if 'X' in inst['Type'][0]: |
---|
4775 | df.write('X-RAY\n') |
---|
4776 | elif 'N' in inst['Type'][0]: |
---|
4777 | df.write('NEUTRON\n') |
---|
4778 | if ctrls == '0\n0\n3\n' or ctrls == '0\n1\n3\n': |
---|
4779 | df.write('%.4f\n'%(G2mth.getMeanWave(inst))) |
---|
4780 | U = ateln2*inst['U'][1]/10000. |
---|
4781 | V = ateln2*inst['V'][1]/10000. |
---|
4782 | W = ateln2*inst['W'][1]/10000. |
---|
4783 | HWHM = U*nptand(x0[iBeg:iFin]/2.)**2+V*nptand(x0[iBeg:iFin]/2.)+W |
---|
4784 | HW = np.sqrt(np.mean(HWHM)) |
---|
4785 | # df.write('PSEUDO-VOIGT 0.015 -0.0036 0.009 0.605 TRIM\n') |
---|
4786 | if 'Mean' in Layers['selInst']: |
---|
4787 | df.write('GAUSSIAN %.6f TRIM\n'%(HW)) #fast option - might not really matter |
---|
4788 | elif 'Gaussian' in Layers['selInst']: |
---|
4789 | df.write('GAUSSIAN %.6f %.6f %.6f TRIM\n'%(U,V,W)) #slow - make a GUI option? |
---|
4790 | else: |
---|
4791 | df.write('None\n') |
---|
4792 | else: |
---|
4793 | df.write('0.10\nNone\n') |
---|
4794 | df.write('STRUCTURAL\n') |
---|
4795 | a,b,c = Layers['Cell'][1:4] |
---|
4796 | gam = Layers['Cell'][6] |
---|
4797 | df.write('%.4f %.4f %.4f %.3f\n'%(a,b,c,gam)) |
---|
4798 | laue = Layers['Laue'] |
---|
4799 | if laue == '2/m(ab)': |
---|
4800 | laue = '2/m(1)' |
---|
4801 | elif laue == '2/m(c)': |
---|
4802 | laue = '2/m(2)' |
---|
4803 | if 'unknown' in Layers['Laue']: |
---|
4804 | df.write('%s %.3f\n'%(laue,Layers['Toler'])) |
---|
4805 | else: |
---|
4806 | df.write('%s\n'%(laue)) |
---|
4807 | df.write('%d\n'%(len(Layers['Layers']))) |
---|
4808 | if Layers['Width'][0][0] < 1. or Layers['Width'][0][1] < 1.: |
---|
4809 | df.write('%.1f %.1f\n'%(Layers['Width'][0][0]*10000.,Layers['Width'][0][0]*10000.)) #mum to A |
---|
4810 | layerNames = [] |
---|
4811 | for layer in Layers['Layers']: |
---|
4812 | layerNames.append(layer['Name']) |
---|
4813 | for il,layer in enumerate(Layers['Layers']): |
---|
4814 | if layer['SameAs']: |
---|
4815 | df.write('LAYER %d = %d\n'%(il+1,layerNames.index(layer['SameAs'])+1)) |
---|
4816 | continue |
---|
4817 | df.write('LAYER %d\n'%(il+1)) |
---|
4818 | if '-1' in layer['Symm']: |
---|
4819 | df.write('CENTROSYMMETRIC\n') |
---|
4820 | else: |
---|
4821 | df.write('NONE\n') |
---|
4822 | for ia,atom in enumerate(layer['Atoms']): |
---|
4823 | [name,atype,x,y,z,frac,Uiso] = atom |
---|
4824 | if '-1' in layer['Symm'] and [x,y,z] == [0.,0.,0.]: |
---|
4825 | frac /= 2. |
---|
4826 | df.write('%4s %3d %.5f %.5f %.5f %.4f %.2f\n'%(atype.ljust(6),ia,x,y,z,78.9568*Uiso,frac)) |
---|
4827 | df.write('STACKING\n') |
---|
4828 | df.write('%s\n'%(Layers['Stacking'][0])) |
---|
4829 | if 'recursive' in Layers['Stacking'][0]: |
---|
4830 | df.write('%s\n'%Layers['Stacking'][1]) |
---|
4831 | else: |
---|
4832 | if 'list' in Layers['Stacking'][1]: |
---|
4833 | Slen = len(Layers['Stacking'][2]) |
---|
4834 | iB = 0 |
---|
4835 | iF = 0 |
---|
4836 | while True: |
---|
4837 | iF += 68 |
---|
4838 | if iF >= Slen: |
---|
4839 | break |
---|
4840 | iF = min(iF,Slen) |
---|
4841 | df.write('%s\n'%(Layers['Stacking'][2][iB:iF])) |
---|
4842 | iB = iF |
---|
4843 | else: |
---|
4844 | df.write('%s\n'%Layers['Stacking'][1]) |
---|
4845 | df.write('TRANSITIONS\n') |
---|
4846 | for iY in range(len(Layers['Layers'])): |
---|
4847 | sumPx = 0. |
---|
4848 | for iX in range(len(Layers['Layers'])): |
---|
4849 | p,dx,dy,dz = Layers['Transitions'][iY][iX][:4] |
---|
4850 | p = round(p,3) |
---|
4851 | df.write('%.3f %.5f %.5f %.5f\n'%(p,dx,dy,dz)) |
---|
4852 | sumPx += p |
---|
4853 | if sumPx != 1.0: #this has to be picky since DIFFaX is. |
---|
4854 | G2fil.G2Print ('ERROR - Layer probabilities sum to %.3f DIFFaX will insist it = 1.0'%sumPx) |
---|
4855 | df.close() |
---|
4856 | os.remove('data.sfc') |
---|
4857 | os.remove('control.dif') |
---|
4858 | os.remove('GSASII-DIFFaX.dat') |
---|
4859 | return |
---|
4860 | df.close() |
---|
4861 | time0 = time.time() |
---|
4862 | try: |
---|
4863 | subp.call(DIFFaX) |
---|
4864 | except OSError: |
---|
4865 | G2fil.G2Print('DIFFax.exe is not available for this platform',mode='warn') |
---|
4866 | G2fil.G2Print (' DIFFaX time = %.2fs'%(time.time()-time0)) |
---|
4867 | if os.path.exists('GSASII-DIFFaX.spc'): |
---|
4868 | Xpat = np.loadtxt('GSASII-DIFFaX.spc').T |
---|
4869 | iFin = iBeg+Xpat.shape[1] |
---|
4870 | bakType,backDict,backVary = SetBackgroundParms(background) |
---|
4871 | backDict['Lam1'] = G2mth.getWave(inst) |
---|
4872 | profile[4][iBeg:iFin] = getBackground('',backDict,bakType,inst['Type'][0],profile[0][iBeg:iFin])[0] |
---|
4873 | profile[3][iBeg:iFin] = Xpat[-1]*scale+profile[4][iBeg:iFin] |
---|
4874 | if not np.any(profile[1]): #fill dummy data x,y,w,yc,yb,yd |
---|
4875 | rv = st.poisson(profile[3][iBeg:iFin]) |
---|
4876 | profile[1][iBeg:iFin] = rv.rvs() |
---|
4877 | Z = np.ones_like(profile[3][iBeg:iFin]) |
---|
4878 | Z[1::2] *= -1 |
---|
4879 | profile[1][iBeg:iFin] = profile[3][iBeg:iFin]+np.abs(profile[1][iBeg:iFin]-profile[3][iBeg:iFin])*Z |
---|
4880 | profile[2][iBeg:iFin] = np.where(profile[1][iBeg:iFin]>0.,1./profile[1][iBeg:iFin],1.0) |
---|
4881 | profile[5][iBeg:iFin] = profile[1][iBeg:iFin]-profile[3][iBeg:iFin] |
---|
4882 | #cleanup files.. |
---|
4883 | os.remove('GSASII-DIFFaX.spc') |
---|
4884 | elif os.path.exists('GSASII-DIFFaX.sadp'): |
---|
4885 | Sadp = np.fromfile('GSASII-DIFFaX.sadp','>u2') |
---|
4886 | Sadp = np.reshape(Sadp,(256,-1)) |
---|
4887 | Layers['Sadp']['Img'] = Sadp |
---|
4888 | os.remove('GSASII-DIFFaX.sadp') |
---|
4889 | os.remove('data.sfc') |
---|
4890 | os.remove('control.dif') |
---|
4891 | os.remove('GSASII-DIFFaX.dat') |
---|
4892 | |
---|
4893 | def SetPWDRscan(inst,limits,profile): |
---|
4894 | |
---|
4895 | wave = G2mth.getMeanWave(inst) |
---|
4896 | x0 = profile[0] |
---|
4897 | iBeg = np.searchsorted(x0,limits[0]) |
---|
4898 | iFin = np.searchsorted(x0,limits[1]) |
---|
4899 | if iFin-iBeg > 20000: |
---|
4900 | iFin = iBeg+20000 |
---|
4901 | Dx = (x0[iFin]-x0[iBeg])/(iFin-iBeg) |
---|
4902 | pyx.pygetinst(wave,x0[iBeg],x0[iFin],Dx) |
---|
4903 | return iFin-iBeg |
---|
4904 | |
---|
4905 | def SetStackingSF(Layers,debug): |
---|
4906 | # Load scattering factors into DIFFaX arrays |
---|
4907 | import atmdata |
---|
4908 | atTypes = Layers['AtInfo'].keys() |
---|
4909 | aTypes = [] |
---|
4910 | for atype in atTypes: |
---|
4911 | aTypes.append('%4s'%(atype.ljust(4))) |
---|
4912 | SFdat = [] |
---|
4913 | for atType in atTypes: |
---|
4914 | Adat = atmdata.XrayFF[atType] |
---|
4915 | SF = np.zeros(9) |
---|
4916 | SF[:8:2] = Adat['fa'] |
---|
4917 | SF[1:8:2] = Adat['fb'] |
---|
4918 | SF[8] = Adat['fc'] |
---|
4919 | SFdat.append(SF) |
---|
4920 | SFdat = np.array(SFdat) |
---|
4921 | pyx.pyloadscf(len(atTypes),aTypes,SFdat.T,debug) |
---|
4922 | |
---|
4923 | def SetStackingClay(Layers,Type): |
---|
4924 | # Controls |
---|
4925 | rand.seed() |
---|
4926 | ranSeed = rand.randint(1,2**16-1) |
---|
4927 | try: |
---|
4928 | laueId = ['-1','2/m(ab)','2/m(c)','mmm','-3','-3m','4/m','4/mmm', |
---|
4929 | '6/m','6/mmm'].index(Layers['Laue'])+1 |
---|
4930 | except ValueError: #for 'unknown' |
---|
4931 | laueId = -1 |
---|
4932 | if 'SADP' in Type: |
---|
4933 | planeId = ['h0l','0kl','hhl','h-hl'].index(Layers['Sadp']['Plane'])+1 |
---|
4934 | lmax = int(Layers['Sadp']['Lmax']) |
---|
4935 | else: |
---|
4936 | planeId = 0 |
---|
4937 | lmax = 0 |
---|
4938 | # Sequences |
---|
4939 | StkType = ['recursive','explicit'].index(Layers['Stacking'][0]) |
---|
4940 | try: |
---|
4941 | StkParm = ['infinite','random','list'].index(Layers['Stacking'][1]) |
---|
4942 | except ValueError: |
---|
4943 | StkParm = -1 |
---|
4944 | if StkParm == 2: #list |
---|
4945 | StkSeq = [int(val) for val in Layers['Stacking'][2].split()] |
---|
4946 | Nstk = len(StkSeq) |
---|
4947 | else: |
---|
4948 | Nstk = 1 |
---|
4949 | StkSeq = [0,] |
---|
4950 | if StkParm == -1: |
---|
4951 | StkParm = int(Layers['Stacking'][1]) |
---|
4952 | Wdth = Layers['Width'][0] |
---|
4953 | mult = 1 |
---|
4954 | controls = [laueId,planeId,lmax,mult,StkType,StkParm,ranSeed] |
---|
4955 | LaueSym = Layers['Laue'].ljust(12) |
---|
4956 | pyx.pygetclay(controls,LaueSym,Wdth,Nstk,StkSeq) |
---|
4957 | return laueId,controls |
---|
4958 | |
---|
4959 | def SetCellAtoms(Layers): |
---|
4960 | Cell = Layers['Cell'][1:4]+Layers['Cell'][6:7] |
---|
4961 | # atoms in layers |
---|
4962 | atTypes = list(Layers['AtInfo'].keys()) |
---|
4963 | AtomXOU = [] |
---|
4964 | AtomTp = [] |
---|
4965 | LayerSymm = [] |
---|
4966 | LayerNum = [] |
---|
4967 | layerNames = [] |
---|
4968 | Natm = 0 |
---|
4969 | Nuniq = 0 |
---|
4970 | for layer in Layers['Layers']: |
---|
4971 | layerNames.append(layer['Name']) |
---|
4972 | for il,layer in enumerate(Layers['Layers']): |
---|
4973 | if layer['SameAs']: |
---|
4974 | LayerNum.append(layerNames.index(layer['SameAs'])+1) |
---|
4975 | continue |
---|
4976 | else: |
---|
4977 | LayerNum.append(il+1) |
---|
4978 | Nuniq += 1 |
---|
4979 | if '-1' in layer['Symm']: |
---|
4980 | LayerSymm.append(1) |
---|
4981 | else: |
---|
4982 | LayerSymm.append(0) |
---|
4983 | for ia,atom in enumerate(layer['Atoms']): |
---|
4984 | [name,atype,x,y,z,frac,Uiso] = atom |
---|
4985 | Natm += 1 |
---|
4986 | AtomTp.append('%4s'%(atype.ljust(4))) |
---|
4987 | Ta = atTypes.index(atype)+1 |
---|
4988 | AtomXOU.append([float(Nuniq),float(ia+1),float(Ta),x,y,z,frac,Uiso*78.9568]) |
---|
4989 | AtomXOU = np.array(AtomXOU) |
---|
4990 | Nlayers = len(layerNames) |
---|
4991 | pyx.pycellayer(Cell,Natm,AtomTp,AtomXOU.T,Nuniq,LayerSymm,Nlayers,LayerNum) |
---|
4992 | return Nlayers |
---|
4993 | |
---|
4994 | def SetStackingTrans(Layers,Nlayers): |
---|
4995 | # Transitions |
---|
4996 | TransX = [] |
---|
4997 | TransP = [] |
---|
4998 | for Ytrans in Layers['Transitions']: |
---|
4999 | TransP.append([trans[0] for trans in Ytrans]) #get just the numbers |
---|
5000 | TransX.append([trans[1:4] for trans in Ytrans]) #get just the numbers |
---|
5001 | TransP = np.array(TransP,dtype='float').T |
---|
5002 | TransX = np.array(TransX,dtype='float') |
---|
5003 | # GSASIIpath.IPyBreak() |
---|
5004 | pyx.pygettrans(Nlayers,TransP,TransX) |
---|
5005 | |
---|
5006 | def CalcStackingPWDR(Layers,scale,background,limits,inst,profile,debug): |
---|
5007 | # Scattering factors |
---|
5008 | SetStackingSF(Layers,debug) |
---|
5009 | # Controls & sequences |
---|
5010 | laueId,controls = SetStackingClay(Layers,'PWDR') |
---|
5011 | # cell & atoms |
---|
5012 | Nlayers = SetCellAtoms(Layers) |
---|
5013 | Volume = Layers['Cell'][7] |
---|
5014 | # Transitions |
---|
5015 | SetStackingTrans(Layers,Nlayers) |
---|
5016 | # PWDR scan |
---|
5017 | Nsteps = SetPWDRscan(inst,limits,profile) |
---|
5018 | # result as Spec |
---|
5019 | x0 = profile[0] |
---|
5020 | profile[3] = np.zeros(len(profile[0])) |
---|
5021 | profile[4] = np.zeros(len(profile[0])) |
---|
5022 | profile[5] = np.zeros(len(profile[0])) |
---|
5023 | iBeg = np.searchsorted(x0,limits[0]) |
---|
5024 | iFin = np.searchsorted(x0,limits[1])+1 |
---|
5025 | if iFin-iBeg > 20000: |
---|
5026 | iFin = iBeg+20000 |
---|
5027 | Nspec = 20001 |
---|
5028 | spec = np.zeros(Nspec,dtype='double') |
---|
5029 | time0 = time.time() |
---|
5030 | pyx.pygetspc(controls,Nspec,spec) |
---|
5031 | G2fil.G2Print (' GETSPC time = %.2fs'%(time.time()-time0)) |
---|
5032 | time0 = time.time() |
---|
5033 | U = ateln2*inst['U'][1]/10000. |
---|
5034 | V = ateln2*inst['V'][1]/10000. |
---|
5035 | W = ateln2*inst['W'][1]/10000. |
---|
5036 | HWHM = U*nptand(x0[iBeg:iFin]/2.)**2+V*nptand(x0[iBeg:iFin]/2.)+W |
---|
5037 | HW = np.sqrt(np.mean(HWHM)) |
---|
5038 | BrdSpec = np.zeros(Nsteps) |
---|
5039 | if 'Mean' in Layers['selInst']: |
---|
5040 | pyx.pyprofile(U,V,W,HW,1,Nsteps,BrdSpec) |
---|
5041 | elif 'Gaussian' in Layers['selInst']: |
---|
5042 | pyx.pyprofile(U,V,W,HW,4,Nsteps,BrdSpec) |
---|
5043 | else: |
---|
5044 | BrdSpec = spec[:Nsteps] |
---|
5045 | BrdSpec /= Volume |
---|
5046 | iFin = iBeg+Nsteps |
---|
5047 | bakType,backDict,backVary = SetBackgroundParms(background) |
---|
5048 | backDict['Lam1'] = G2mth.getWave(inst) |
---|
5049 | profile[4][iBeg:iFin] = getBackground('',backDict,bakType,inst['Type'][0],profile[0][iBeg:iFin])[0] |
---|
5050 | profile[3][iBeg:iFin] = BrdSpec*scale+profile[4][iBeg:iFin] |
---|
5051 | if not np.any(profile[1]): #fill dummy data x,y,w,yc,yb,yd |
---|
5052 | try: |
---|
5053 | rv = st.poisson(profile[3][iBeg:iFin]) |
---|
5054 | profile[1][iBeg:iFin] = rv.rvs() |
---|
5055 | except ValueError: |
---|
5056 | profile[1][iBeg:iFin] = profile[3][iBeg:iFin] |
---|
5057 | Z = np.ones_like(profile[3][iBeg:iFin]) |
---|
5058 | Z[1::2] *= -1 |
---|
5059 | profile[1][iBeg:iFin] = profile[3][iBeg:iFin]+np.abs(profile[1][iBeg:iFin]-profile[3][iBeg:iFin])*Z |
---|
5060 | profile[2][iBeg:iFin] = np.where(profile[1][iBeg:iFin]>0.,1./profile[1][iBeg:iFin],1.0) |
---|
5061 | profile[5][iBeg:iFin] = profile[1][iBeg:iFin]-profile[3][iBeg:iFin] |
---|
5062 | G2fil.G2Print (' Broadening time = %.2fs'%(time.time()-time0)) |
---|
5063 | |
---|
5064 | def CalcStackingSADP(Layers,debug): |
---|
5065 | |
---|
5066 | # Scattering factors |
---|
5067 | SetStackingSF(Layers,debug) |
---|
5068 | # Controls & sequences |
---|
5069 | laueId,controls = SetStackingClay(Layers,'SADP') |
---|
5070 | # cell & atoms |
---|
5071 | Nlayers = SetCellAtoms(Layers) |
---|
5072 | # Transitions |
---|
5073 | SetStackingTrans(Layers,Nlayers) |
---|
5074 | # result as Sadp |
---|
5075 | Nspec = 20001 |
---|
5076 | spec = np.zeros(Nspec,dtype='double') |
---|
5077 | time0 = time.time() |
---|
5078 | hkLim,Incr,Nblk = pyx.pygetsadp(controls,Nspec,spec) |
---|
5079 | Sapd = np.zeros((256,256)) |
---|
5080 | iB = 0 |
---|
5081 | for i in range(hkLim): |
---|
5082 | iF = iB+Nblk |
---|
5083 | p1 = 127+int(i*Incr) |
---|
5084 | p2 = 128-int(i*Incr) |
---|
5085 | if Nblk == 128: |
---|
5086 | if i: |
---|
5087 | Sapd[128:,p1] = spec[iB:iF] |
---|
5088 | Sapd[:128,p1] = spec[iF:iB:-1] |
---|
5089 | Sapd[128:,p2] = spec[iB:iF] |
---|
5090 | Sapd[:128,p2] = spec[iF:iB:-1] |
---|
5091 | else: |
---|
5092 | if i: |
---|
5093 | Sapd[:,p1] = spec[iB:iF] |
---|
5094 | Sapd[:,p2] = spec[iB:iF] |
---|
5095 | iB += Nblk |
---|
5096 | Layers['Sadp']['Img'] = Sapd |
---|
5097 | G2fil.G2Print (' GETSAD time = %.2fs'%(time.time()-time0)) |
---|
5098 | |
---|
5099 | #### Maximum Entropy Method - Dysnomia ############################################################################### |
---|
5100 | def makePRFfile(data,MEMtype): |
---|
5101 | ''' makes Dysnomia .prf control file from Dysnomia GUI controls |
---|
5102 | |
---|
5103 | :param dict data: GSAS-II phase data |
---|
5104 | :param int MEMtype: 1 for neutron data with negative scattering lengths |
---|
5105 | 0 otherwise |
---|
5106 | :returns str: name of Dysnomia control file |
---|
5107 | ''' |
---|
5108 | |
---|
5109 | generalData = data['General'] |
---|
5110 | pName = generalData['Name'].replace(' ','_') |
---|
5111 | DysData = data['Dysnomia'] |
---|
5112 | prfName = pName+'.prf' |
---|
5113 | prf = open(prfName,'w') |
---|
5114 | prf.write('$PREFERENCES\n') |
---|
5115 | prf.write(pName+'.mem\n') #or .fos? |
---|
5116 | prf.write(pName+'.out\n') |
---|
5117 | prf.write(pName+'.pgrid\n') |
---|
5118 | prf.write(pName+'.fba\n') |
---|
5119 | prf.write(pName+'_eps.raw\n') |
---|
5120 | prf.write('%d\n'%MEMtype) |
---|
5121 | if DysData['DenStart'] == 'uniform': |
---|
5122 | prf.write('0\n') |
---|
5123 | else: |
---|
5124 | prf.write('1\n') |
---|
5125 | if DysData['Optimize'] == 'ZSPA': |
---|
5126 | prf.write('0\n') |
---|
5127 | else: |
---|
5128 | prf.write('1\n') |
---|
5129 | prf.write('1\n') |
---|
5130 | if DysData['Lagrange'][0] == 'user': |
---|
5131 | prf.write('0\n') |
---|
5132 | else: |
---|
5133 | prf.write('1\n') |
---|
5134 | prf.write('%.4f %d\n'%(DysData['Lagrange'][1],DysData['wt pwr'])) |
---|
5135 | prf.write('%.3f\n'%DysData['Lagrange'][2]) |
---|
5136 | prf.write('%.2f\n'%DysData['E_factor']) |
---|
5137 | prf.write('1\n') |
---|
5138 | prf.write('0\n') |
---|
5139 | prf.write('%d\n'%DysData['Ncyc']) |
---|
5140 | prf.write('1\n') |
---|
5141 | prf.write('1 0 0 0 0 0 0 0\n') |
---|
5142 | if DysData['prior'] == 'uniform': |
---|
5143 | prf.write('0\n') |
---|
5144 | else: |
---|
5145 | prf.write('1\n') |
---|
5146 | prf.close() |
---|
5147 | return prfName |
---|
5148 | |
---|
5149 | def makeMEMfile(data,reflData,MEMtype,DYSNOMIA): |
---|
5150 | ''' make Dysnomia .mem file of reflection data, etc. |
---|
5151 | |
---|
5152 | :param dict data: GSAS-II phase data |
---|
5153 | :param list reflData: GSAS-II reflection data |
---|
5154 | :param int MEMtype: 1 for neutron data with negative scattering lengths |
---|
5155 | 0 otherwise |
---|
5156 | :param str DYSNOMIA: path to dysnomia.exe |
---|
5157 | ''' |
---|
5158 | |
---|
5159 | DysData = data['Dysnomia'] |
---|
5160 | generalData = data['General'] |
---|
5161 | cell = generalData['Cell'][1:7] |
---|
5162 | A = G2lat.cell2A(cell) |
---|
5163 | SGData = generalData['SGData'] |
---|
5164 | pName = generalData['Name'].replace(' ','_') |
---|
5165 | memName = pName+'.mem' |
---|
5166 | Map = generalData['Map'] |
---|
5167 | Type = Map['Type'] |
---|
5168 | UseList = Map['RefList'] |
---|
5169 | mem = open(memName,'w') |
---|
5170 | mem.write('%s\n'%(generalData['Name']+' from '+UseList[0])) |
---|
5171 | a,b,c,alp,bet,gam = cell |
---|
5172 | mem.write('%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f\n'%(a,b,c,alp,bet,gam)) |
---|
5173 | mem.write(' 0.0000000 0.0000000 -1 0 0 0 P\n') #dummy PO stuff |
---|
5174 | SGSym = generalData['SGData']['SpGrp'] |
---|
5175 | try: |
---|
5176 | SGId = G2spc.spgbyNum.index(SGSym) |
---|
5177 | except ValueError: |
---|
5178 | return False |
---|
5179 | org = 1 |
---|
5180 | if SGSym in G2spc.spg2origins: |
---|
5181 | org = 2 |
---|
5182 | mapsize = Map['rho'].shape |
---|
5183 | sumZ = 0. |
---|
5184 | sumpos = 0. |
---|
5185 | sumneg = 0. |
---|
5186 | mem.write('%5d%5d%5d%5d%5d\n'%(SGId,org,mapsize[0],mapsize[1],mapsize[2])) |
---|
5187 | for atm in generalData['NoAtoms']: |
---|
5188 | Nat = generalData['NoAtoms'][atm] |
---|
5189 | AtInfo = G2elem.GetAtomInfo(atm) |
---|
5190 | sumZ += Nat*AtInfo['Z'] |
---|
5191 | isotope = generalData['Isotope'][atm] |
---|
5192 | blen = generalData['Isotopes'][atm][isotope]['SL'][0] |
---|
5193 | if blen < 0.: |
---|
5194 | sumneg += blen*Nat |
---|
5195 | else: |
---|
5196 | sumpos += blen*Nat |
---|
5197 | if 'X' in Type: |
---|
5198 | mem.write('%10.2f 0.001\n'%sumZ) |
---|
5199 | elif 'N' in Type and MEMtype: |
---|
5200 | mem.write('%10.3f%10.3f 0.001\n'%(sumpos,sumneg)) |
---|
5201 | else: |
---|
5202 | mem.write('%10.3f 0.001\n'%sumpos) |
---|
5203 | |
---|
5204 | dmin = DysData['MEMdmin'] |
---|
5205 | TOFlam = 2.0*dmin*npsind(80.0) |
---|
5206 | refSet = G2lat.GenHLaue(dmin,SGData,A) #list of h,k,l,d |
---|
5207 | refDict = {'%d %d %d'%(ref[0],ref[1],ref[2]):ref for ref in refSet} |
---|
5208 | |
---|
5209 | refs = [] |
---|
5210 | prevpos = 0. |
---|
5211 | for ref in reflData: |
---|
5212 | if ref[3] < 0: |
---|
5213 | continue |
---|
5214 | if 'T' in Type: |
---|
5215 | h,k,l,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,x,x,x,x,prfo = ref[:16] |
---|
5216 | s = np.sqrt(max(sig,0.0001)) #var -> sig in deg |
---|
5217 | FWHM = getgamFW(gam,s) |
---|
5218 | if dsp < dmin: |
---|
5219 | continue |
---|
5220 | theta = npasind(TOFlam/(2.*dsp)) |
---|
5221 | FWHM *= nptand(theta)/pos |
---|
5222 | pos = 2.*theta |
---|
5223 | else: |
---|
5224 | h,k,l,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,x,prfo = ref[:13] |
---|
5225 | g = gam/100. #centideg -> deg |
---|
5226 | s = np.sqrt(max(sig,0.0001))/100. #var -> sig in deg |
---|
5227 | FWHM = getgamFW(g,s) |
---|
5228 | delt = pos-prevpos |
---|
5229 | refs.append([h,k,l,mult,pos,FWHM,Fobs,phase,delt]) |
---|
5230 | prevpos = pos |
---|
5231 | |
---|
5232 | ovlp = DysData['overlap'] |
---|
5233 | refs1 = [] |
---|
5234 | refs2 = [] |
---|
5235 | nref2 = 0 |
---|
5236 | iref = 0 |
---|
5237 | Nref = len(refs) |
---|
5238 | start = False |
---|
5239 | while iref < Nref-1: |
---|
5240 | if refs[iref+1][-1] < ovlp*refs[iref][5]: |
---|
5241 | if refs[iref][-1] > ovlp*refs[iref][5]: |
---|
5242 | refs2.append([]) |
---|
5243 | start = True |
---|
5244 | if nref2 == len(refs2): |
---|
5245 | refs2.append([]) |
---|
5246 | refs2[nref2].append(refs[iref]) |
---|
5247 | else: |
---|
5248 | if start: |
---|
5249 | refs2[nref2].append(refs[iref]) |
---|
5250 | start = False |
---|
5251 | nref2 += 1 |
---|
5252 | else: |
---|
5253 | refs1.append(refs[iref]) |
---|
5254 | iref += 1 |
---|
5255 | if start: |
---|
5256 | refs2[nref2].append(refs[iref]) |
---|
5257 | else: |
---|
5258 | refs1.append(refs[iref]) |
---|
5259 | |
---|
5260 | mem.write('%5d\n'%len(refs1)) |
---|
5261 | for ref in refs1: |
---|
5262 | h,k,l = ref[:3] |
---|
5263 | hkl = '%d %d %d'%(h,k,l) |
---|
5264 | if hkl in refDict: |
---|
5265 | del refDict[hkl] |
---|
5266 | Fobs = np.sqrt(ref[6]) |
---|
5267 | mem.write('%5d%5d%5d%10.3f%10.3f%10.3f\n'%(h,k,l,Fobs*npcosd(ref[7]),Fobs*npsind(ref[7]),max(0.01*Fobs,0.1))) |
---|
5268 | while True and nref2: |
---|
5269 | if not len(refs2[-1]): |
---|
5270 | del refs2[-1] |
---|
5271 | else: |
---|
5272 | break |
---|
5273 | mem.write('%5d\n'%len(refs2)) |
---|
5274 | for iref2,ref2 in enumerate(refs2): |
---|
5275 | mem.write('#%5d\n'%iref2) |
---|
5276 | mem.write('%5d\n'%len(ref2)) |
---|
5277 | Gsum = 0. |
---|
5278 | Msum = 0 |
---|
5279 | for ref in ref2: |
---|
5280 | Gsum += ref[6]*ref[3] |
---|
5281 | Msum += ref[3] |
---|
5282 | G = np.sqrt(Gsum/Msum) |
---|
5283 | h,k,l = ref2[0][:3] |
---|
5284 | hkl = '%d %d %d'%(h,k,l) |
---|
5285 | if hkl in refDict: |
---|
5286 | del refDict[hkl] |
---|
5287 | mem.write('%5d%5d%5d%10.3f%10.3f%5d\n'%(h,k,l,G,max(0.01*G,0.1),ref2[0][3])) |
---|
5288 | for ref in ref2[1:]: |
---|
5289 | h,k,l,m = ref[:4] |
---|
5290 | mem.write('%5d%5d%5d%5d\n'%(h,k,l,m)) |
---|
5291 | hkl = '%d %d %d'%(h,k,l) |
---|
5292 | if hkl in refDict: |
---|
5293 | del refDict[hkl] |
---|
5294 | if len(refDict): |
---|
5295 | mem.write('%d\n'%len(refDict)) |
---|
5296 | for hkl in list(refDict.keys()): |
---|
5297 | h,k,l = refDict[hkl][:3] |
---|
5298 | mem.write('%5d%5d%5d\n'%(h,k,l)) |
---|
5299 | else: |
---|
5300 | mem.write('0\n') |
---|
5301 | mem.close() |
---|
5302 | return True |
---|
5303 | |
---|
5304 | def MEMupdateReflData(prfName,data,reflData): |
---|
5305 | ''' Update reflection data with new Fosq, phase result from Dysnomia |
---|
5306 | |
---|
5307 | :param str prfName: phase.mem file name |
---|
5308 | :param list reflData: GSAS-II reflection data |
---|
5309 | ''' |
---|
5310 | |
---|
5311 | generalData = data['General'] |
---|
5312 | Map = generalData['Map'] |
---|
5313 | Type = Map['Type'] |
---|
5314 | cell = generalData['Cell'][1:7] |
---|
5315 | A = G2lat.cell2A(cell) |
---|
5316 | reflDict = {} |
---|
5317 | newRefs = [] |
---|
5318 | for iref,ref in enumerate(reflData): |
---|
5319 | if ref[3] > 0: |
---|
5320 | newRefs.append(ref) |
---|
5321 | reflDict[hash('%5d%5d%5d'%(ref[0],ref[1],ref[2]))] = iref |
---|
5322 | fbaName = os.path.splitext(prfName)[0]+'.fba' |
---|
5323 | if os.path.isfile(fbaName): |
---|
5324 | fba = open(fbaName,'r') |
---|
5325 | else: |
---|
5326 | return False |
---|
5327 | fba.readline() |
---|
5328 | Nref = int(fba.readline()[:-1]) |
---|
5329 | fbalines = fba.readlines() |
---|
5330 | fba.close() |
---|
5331 | for line in fbalines[:Nref]: |
---|
5332 | info = line.split() |
---|
5333 | h = int(info[0]) |
---|
5334 | k = int(info[1]) |
---|
5335 | l = int(info[2]) |
---|
5336 | FoR = float(info[3]) |
---|
5337 | FoI = float(info[4]) |
---|
5338 | Fosq = FoR**2+FoI**2 |
---|
5339 | phase = npatan2d(FoI,FoR) |
---|
5340 | try: |
---|
5341 | refId = reflDict[hash('%5d%5d%5d'%(h,k,l))] |
---|
5342 | except KeyError: #added reflections at end skipped |
---|
5343 | d = float(1/np.sqrt(G2lat.calc_rDsq([h,k,l],A))) |
---|
5344 | if 'T' in Type: |
---|
5345 | newRefs.append([h,k,l,-1,d,0.,0.01,1.0,Fosq,Fosq,phase,1.0,1.0,1.0,1.0,1.0,1.0,1.0]) |
---|
5346 | else: |
---|
5347 | newRefs.append([h,k,l,-1,d,0.,0.01,1.0,Fosq,Fosq,phase,1.0,1.0,1.0,1.0]) |
---|
5348 | continue |
---|
5349 | newRefs[refId][8] = Fosq |
---|
5350 | newRefs[refId][10] = phase |
---|
5351 | newRefs = np.array(newRefs) |
---|
5352 | return True,newRefs |
---|
5353 | |
---|
5354 | #===Laue Fringe code =================================================================== |
---|
5355 | import NIST_profile as FP |
---|
5356 | |
---|
5357 | class profileObj(FP.FP_profile): |
---|
5358 | def conv_Lauefringe(self): |
---|
5359 | """Compute the FT of the Laue Fringe function""" |
---|
5360 | |
---|
5361 | me=self.get_function_name() #the name of this convolver,as a string |
---|
5362 | wave = self.param_dicts['conv_global']['dominant_wavelength']*1.e10 # in A |
---|
5363 | pos = np.rad2deg(self.param_dicts["conv_global"]["twotheta0"]) # peak position as 2theta in deg |
---|
5364 | posQ = np.pi * 4 * np.sin(self.param_dicts["conv_global"]["twotheta0"]/2) / wave # peak position as Q |
---|
5365 | ttwid = self.twotheta_window_fullwidth_deg |
---|
5366 | ncell = self.param_dicts[me]['Ncells'] |
---|
5367 | co2 = self.param_dicts[me]['clat'] / 2. |
---|
5368 | damp = self.param_dicts[me]['damp'] |
---|
5369 | asym = self.param_dicts[me]['asym'] |
---|
5370 | ttlist = np.linspace(pos-ttwid/2,pos+ttwid/2,len(self._epsb2)) |
---|
5371 | Qs = np.pi * 4 * np.sin(np.deg2rad(ttlist/2)) / wave |
---|
5372 | w = np.exp(-10**((damp-asym) * (Qs - posQ)**2)) |
---|
5373 | w2 = np.exp(-10**((damp+asym) * (Qs - posQ)**2)) |
---|
5374 | w[len(w)//2:] = w2[len(w)//2:] |
---|
5375 | weqdiv = w * np.sin(Qs * ncell * co2)**2 / (np.sin(Qs * co2)**2) |
---|
5376 | weqdiv[:np.searchsorted(Qs,posQ - np.pi/self.param_dicts[me]['clat'])] = 0 # isolate central peak, if needed |
---|
5377 | weqdiv[np.searchsorted(Qs,posQ + np.pi/self.param_dicts[me]['clat']):] = 0 |
---|
5378 | conv = FP.best_rfft(weqdiv) |
---|
5379 | conv[1::2] *= -1 #flip center |
---|
5380 | return conv |
---|
5381 | |
---|
5382 | def conv_Lorentzian(self): |
---|
5383 | """Compute the FT of a Lorentz function where gamma is the FWHM""" |
---|
5384 | ttwid = self.twotheta_window_fullwidth_deg |
---|
5385 | me=self.get_function_name() #the name of this convolver,as a string |
---|
5386 | g2gam = self.param_dicts[me]['g2gam'] # gsas-ii gamma in centidegrees |
---|
5387 | gamma = g2gam/100 # deg |
---|
5388 | ttlist = np.linspace(-ttwid/2,ttwid/2,len(self._epsb2)) |
---|
5389 | eqdiv = (0.5 * gamma / np.pi) / (gamma**2/4. + ttlist**2) |
---|
5390 | conv = FP.best_rfft(eqdiv) |
---|
5391 | conv[1::2] *= -1 #flip center |
---|
5392 | return conv |
---|
5393 | |
---|
5394 | def conv_Gaussian(self): |
---|
5395 | """Compute the FT of a Gaussian where sigma**2 is the variance""" |
---|
5396 | ttwid = self.twotheta_window_fullwidth_deg |
---|
5397 | me=self.get_function_name() #the name of this convolver,as a string |
---|
5398 | g2sig2 = self.param_dicts[me]['g2sig2'] # gsas-ii sigma**2 in centidegr**2 |
---|
5399 | sigma = math.sqrt(g2sig2)/100. |
---|
5400 | ttlist = np.linspace(-ttwid/2,ttwid/2,len(self._epsb2)) |
---|
5401 | eqdiv = np.exp(-0.5*ttlist**2/sigma**2) / math.sqrt(2*np.pi*sigma**2) |
---|
5402 | conv = FP.best_rfft(eqdiv) |
---|
5403 | conv[1::2] *= -1 #flip center |
---|
5404 | return conv |
---|
5405 | |
---|
5406 | def LaueFringePeakCalc(ttArr,intArr,lam,peakpos,intens,sigma2,gamma,shol,ncells,clat,damp,asym,calcwid,plot=False): |
---|
5407 | '''Compute the peakshape for a Laue Fringe peak convoluted with a Gaussian, Lorentzian & |
---|
5408 | an axial divergence asymmetry correction. |
---|
5409 | |
---|
5410 | :param np.array ttArr: Array of two-theta values (in degrees) |
---|
5411 | :param np.array intArr: Array of intensity values (peaks are added to this) |
---|
5412 | :param float lam: wavelength in Angstrom |
---|
5413 | :param float peakpos: peak position in two-theta (deg.) |
---|
5414 | :param float intens: intensity factor for peak |
---|
5415 | :param float sigma2: Gaussian variance (in centidegrees**2) ** |
---|
5416 | :param float gamma: Lorenzian FWHM (in centidegrees) ** |
---|
5417 | :param float shol: FCJ (S + H)/L where S=sample-half height, H=slit half-height, L=radius ** |
---|
5418 | :param float ncells: number of unit cells in specular direction ** |
---|
5419 | :param float clat: c lattice parameter ** |
---|
5420 | :param float damp: |
---|
5421 | :param float asym: |
---|
5422 | :param float calcwid: two-theta (deg.) width for cutoff of peak computation. |
---|
5423 | Defaults to 5 |
---|
5424 | :param bool plot: for debugging, shows contributions to peak |
---|
5425 | |
---|
5426 | ** If term is <= zero, item is removed from convolution |
---|
5427 | ''' |
---|
5428 | def LaueFringePeakPlot(ttArr,intArr): |
---|
5429 | import matplotlib.pyplot as plt |
---|
5430 | refColors = ['xkcd:blue','xkcd:red','xkcd:green','xkcd:cyan','xkcd:magenta','xkcd:black', |
---|
5431 | 'xkcd:pink','xkcd:brown','xkcd:teal','xkcd:orange','xkcd:grey','xkcd:violet',] |
---|
5432 | fig, ax = plt.subplots() |
---|
5433 | ax.set(title='Peak convolution functions @ 2theta={:.3f}'.format(peakpos), |
---|
5434 | xlabel=r'$\Delta 2\theta, deg$', |
---|
5435 | ylabel=r'Intensity (arbitrary)') |
---|
5436 | ax.set_yscale("log",nonpositive='mask') |
---|
5437 | ttmin = ttmax = 0 |
---|
5438 | for i,conv in enumerate(convList): |
---|
5439 | f = NISTpk.convolver_funcs[conv]() |
---|
5440 | if f is None: continue |
---|
5441 | FFT = FP.best_irfft(f) |
---|
5442 | if f[1].real > 0: FFT = np.roll(FFT,int(len(FFT)/2.)) |
---|
5443 | FFT /= FFT.max() |
---|
5444 | if i == 0: |
---|
5445 | tt = np.linspace(-NISTpk.twotheta_window_fullwidth_deg/2, |
---|
5446 | NISTpk.twotheta_window_fullwidth_deg/2,len(FFT)) |
---|
5447 | ttmin = min(ttmin,tt[np.argmax(FFT>.005)]) |
---|
5448 | ttmax = max(ttmax,tt[::-1][np.argmax(FFT[::-1]>.005)]) |
---|
5449 | color = refColors[i%len(refColors)] |
---|
5450 | ax.plot(tt,FFT,color,label=conv[5:]) |
---|
5451 | color = refColors[(i+1)%len(refColors)] |
---|
5452 | ax.plot(ttArr-peakpos,intArr/max(intArr),color,label='Convolution') |
---|
5453 | ax.set_xlim((ttmin,ttmax)) |
---|
5454 | ax.legend(loc='best') |
---|
5455 | plt.show() |
---|
5456 | # hardcoded constants |
---|
5457 | diffRadius = 220 # diffractometer radius in mm; needed for axial divergence, etc, but should not matter |
---|
5458 | axial_factor = 1.5 # fudge factor to bring sh/l broadening to ~ agree with FPA |
---|
5459 | equatorial_divergence_deg = 0.5 # not sure exactly what this impacts |
---|
5460 | NISTparms = { |
---|
5461 | "": { |
---|
5462 | 'equatorial_divergence_deg' : equatorial_divergence_deg, |
---|
5463 | 'dominant_wavelength' : 1.e-10 * lam, |
---|
5464 | 'diffractometer_radius' : 1e-3* diffRadius, # diffractometer radius in m |
---|
5465 | 'oversampling' : 8, |
---|
5466 | }, |
---|
5467 | "emission": { |
---|
5468 | 'emiss_wavelengths' : 1.e-10 * np.array([lam]), |
---|
5469 | 'emiss_intensities' : np.array([1.]), |
---|
5470 | 'emiss_gauss_widths' : 1.e-10 * 1.e-3 * np.array([0.001]), |
---|
5471 | 'emiss_lor_widths' : 1.e-10 * 1.e-3 * np.array([0.001]), |
---|
5472 | 'crystallite_size_gauss' : 1.e-9 * 1e6, |
---|
5473 | 'crystallite_size_lor' : 1.e-9 * 1e6 |
---|
5474 | }, |
---|
5475 | "axial": { |
---|
5476 | 'axDiv':"full", |
---|
5477 | 'slit_length_source' : 1e-3 * diffRadius * shol * axial_factor, |
---|
5478 | 'slit_length_target' : 1e-3 * diffRadius * shol * 1.00001 * axial_factor, # != 'slit_length_source' |
---|
5479 | 'length_sample' : 1e-3 * diffRadius * shol * axial_factor, |
---|
5480 | 'n_integral_points' : 10, |
---|
5481 | 'angI_deg' : 2.5, |
---|
5482 | 'angD_deg': 2.5, |
---|
5483 | }, |
---|
5484 | 'Gaussian': {'g2sig2': sigma2}, |
---|
5485 | 'Lorentzian': {'g2gam': gamma}, |
---|
5486 | 'Lauefringe': {'Ncells': ncells, 'clat':clat, 'damp': damp, 'asym': asym}, |
---|
5487 | } |
---|
5488 | NISTpk=profileObj(anglemode="twotheta", |
---|
5489 | output_gaussian_smoother_bins_sigma=1.0, |
---|
5490 | oversampling=NISTparms.get('oversampling',10)) |
---|
5491 | NISTpk.debug_cache=False |
---|
5492 | for key in NISTparms: #set parameters for each convolver |
---|
5493 | if key: |
---|
5494 | NISTpk.set_parameters(convolver=key,**NISTparms[key]) |
---|
5495 | else: |
---|
5496 | NISTpk.set_parameters(**NISTparms[key]) |
---|
5497 | # find closest point to peak location (which may be outside limits of the array) |
---|
5498 | center_bin_idx=min(ttArr.searchsorted(peakpos),len(ttArr)-1) |
---|
5499 | step = (ttArr[-1]-ttArr[0])/(len(ttArr)-1) |
---|
5500 | NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step, |
---|
5501 | twotheta_window_center_deg=ttArr[center_bin_idx], |
---|
5502 | twotheta_approx_window_fullwidth_deg=calcwid, |
---|
5503 | ) |
---|
5504 | NISTpk.set_parameters(twotheta0_deg=peakpos) |
---|
5505 | convList = ['conv_emission'] |
---|
5506 | if ncells: convList += ['conv_Lauefringe'] |
---|
5507 | if sigma2 > 0: convList += ['conv_Gaussian'] |
---|
5508 | if gamma > 0: convList += ['conv_Lorentzian'] |
---|
5509 | if shol > 0: convList += ['conv_axial'] |
---|
5510 | |
---|
5511 | # global deriv |
---|
5512 | # if deriv: |
---|
5513 | # peakObj = NISTpk.compute_line_profile(convolver_names=convList,compute_derivative=True) |
---|
5514 | # else: |
---|
5515 | # peakObj = NISTpk.compute_line_profile(convolver_names=convList) |
---|
5516 | peakObj = NISTpk.compute_line_profile(convolver_names=convList) |
---|
5517 | |
---|
5518 | pkPts = len(peakObj.peak) |
---|
5519 | pkMax = peakObj.peak.max() |
---|
5520 | startInd = center_bin_idx-(pkPts//2) |
---|
5521 | istart = None |
---|
5522 | pstart = None |
---|
5523 | iend = None |
---|
5524 | pend = None |
---|
5525 | # adjust data range if peak calc begins below data range or ends above data range |
---|
5526 | # but range of peak calc should not extend past both ends of ttArr |
---|
5527 | if startInd < 0: |
---|
5528 | iend = startInd+pkPts |
---|
5529 | pstart = -startInd |
---|
5530 | elif startInd > len(intArr): |
---|
5531 | return |
---|
5532 | elif startInd+pkPts >= len(intArr): |
---|
5533 | offset = pkPts - len( intArr[startInd:] ) |
---|
5534 | istart = startInd |
---|
5535 | iend = startInd+pkPts-offset |
---|
5536 | pend = -offset |
---|
5537 | else: |
---|
5538 | istart = startInd |
---|
5539 | iend = startInd+pkPts |
---|
5540 | intArr[istart:iend] += intens * peakObj.peak[pstart:pend]/pkMax |
---|
5541 | if plot: |
---|
5542 | LaueFringePeakPlot(ttArr[istart:iend], (intens * peakObj.peak[pstart:pend]/pkMax)) |
---|
5543 | |
---|
5544 | def LaueSatellite(peakpos,wave,c,ncell,j=[-4,-3,-2,-1,0,1,2,3,4]): |
---|
5545 | '''Returns the locations of the Laue satellite positions relative |
---|
5546 | to the peak position |
---|
5547 | |
---|
5548 | :param float peakpos: the peak position in degrees 2theta |
---|
5549 | :param float ncell: Laue fringe parameter, number of unit cells in layer |
---|
5550 | :param list j: the satellite order, where j=-1 is the first satellite |
---|
5551 | on the lower 2theta side and j=1 is the first satellite on the high |
---|
5552 | 2theta side. j=0 gives the peak position |
---|
5553 | ''' |
---|
5554 | Qpos = 4 * np.pi * np.sin(peakpos * np.pi / 360) / wave |
---|
5555 | dQvals = (2 * np.array(j) + np.sign(j)) * np.pi / (c * ncell) |
---|
5556 | return np.arcsin((Qpos+dQvals)*wave/(4*np.pi)) * (360 / np.pi) |
---|
5557 | |
---|
5558 | #### testing data |
---|
5559 | NeedTestData = True |
---|
5560 | def TestData(): |
---|
5561 | 'needs a doc string' |
---|
5562 | # global NeedTestData |
---|
5563 | global bakType |
---|
5564 | bakType = 'chebyschev' |
---|
5565 | global xdata |
---|
5566 | xdata = np.linspace(4.0,40.0,36000) |
---|
5567 | global parmDict0 |
---|
5568 | parmDict0 = { |
---|
5569 | 'pos0':5.6964,'int0':8835.8,'sig0':1.0,'gam0':1.0, |
---|
5570 | 'pos1':11.4074,'int1':3922.3,'sig1':1.0,'gam1':1.0, |
---|
5571 | 'pos2':20.6426,'int2':1573.7,'sig2':1.0,'gam2':1.0, |
---|
5572 | 'pos3':26.9568,'int3':925.1,'sig3':1.0,'gam3':1.0, |
---|
5573 | 'U':1.163,'V':-0.605,'W':0.093,'X':0.0,'Y':2.183,'Z':0.0,'SH/L':0.002, |
---|
5574 | 'Back0':5.384,'Back1':-0.015,'Back2':.004, |
---|
5575 | } |
---|
5576 | global parmDict1 |
---|
5577 | parmDict1 = { |
---|
5578 | 'pos0':13.4924,'int0':48697.6,'sig0':1.0,'gam0':1.0, |
---|
5579 | 'pos1':23.4360,'int1':43685.5,'sig1':1.0,'gam1':1.0, |
---|
5580 | 'pos2':27.1152,'int2':123712.6,'sig2':1.0,'gam2':1.0, |
---|
5581 | 'pos3':33.7196,'int3':65349.4,'sig3':1.0,'gam3':1.0, |
---|
5582 | 'pos4':36.1119,'int4':115829.8,'sig4':1.0,'gam4':1.0, |
---|
5583 | 'pos5':39.0122,'int5':6916.9,'sig5':1.0,'gam5':1.0, |
---|
5584 | 'U':22.75,'V':-17.596,'W':10.594,'X':1.577,'Y':5.778,'Z':0.0,'SH/L':0.002, |
---|
5585 | 'Back0':36.897,'Back1':-0.508,'Back2':.006, |
---|
5586 | 'Lam1':1.540500,'Lam2':1.544300,'I(L2)/I(L1)':0.5, |
---|
5587 | } |
---|
5588 | global parmDict2 |
---|
5589 | parmDict2 = { |
---|
5590 | 'pos0':5.7,'int0':1000.0,'sig0':0.5,'gam0':0.5, |
---|
5591 | 'U':2.,'V':-2.,'W':5.,'X':0.5,'Y':0.5,'Z':0.0,'SH/L':0.02, |
---|
5592 | 'Back0':5.,'Back1':-0.02,'Back2':.004, |
---|
5593 | # 'Lam1':1.540500,'Lam2':1.544300,'I(L2)/I(L1)':0.5, |
---|
5594 | } |
---|
5595 | global varyList |
---|
5596 | varyList = [] |
---|
5597 | |
---|
5598 | def test0(): |
---|
5599 | if NeedTestData: TestData() |
---|
5600 | gplot = plotter.add('FCJ-Voigt, 11BM').gca() |
---|
5601 | gplot.plot(xdata,getBackground('',parmDict0,bakType,'PXC',xdata)[0]) |
---|
5602 | gplot.plot(xdata,getPeakProfile(parmDict0,xdata,np.zeros_like(xdata),varyList,bakType)) |
---|
5603 | fplot = plotter.add('FCJ-Voigt, Ka1+2').gca() |
---|
5604 | fplot.plot(xdata,getBackground('',parmDict1,bakType,'PXC',xdata)[0]) |
---|
5605 | fplot.plot(xdata,getPeakProfile(parmDict1,xdata,np.zeros_like(xdata),varyList,bakType)) |
---|
5606 | |
---|
5607 | def test1(): |
---|
5608 | if NeedTestData: TestData() |
---|
5609 | time0 = time.time() |
---|
5610 | for i in range(100): |
---|
5611 | getPeakProfile(parmDict1,xdata,np.zeros_like(xdata),varyList,bakType) |
---|
5612 | G2fil.G2Print ('100+6*Ka1-2 peaks=1200 peaks %.2f'%time.time()-time0) |
---|
5613 | |
---|
5614 | def test2(name,delt): |
---|
5615 | if NeedTestData: TestData() |
---|
5616 | varyList = [name,] |
---|
5617 | xdata = np.linspace(5.6,5.8,400) |
---|
5618 | hplot = plotter.add('derivatives test for '+name).gca() |
---|
5619 | hplot.plot(xdata,getPeakProfileDerv(parmDict2,xdata,np.zeros_like(xdata),varyList,bakType)[0]) |
---|
5620 | y0 = getPeakProfile(parmDict2,xdata,np.zeros_like(xdata),varyList,bakType) |
---|
5621 | parmDict2[name] += delt |
---|
5622 | y1 = getPeakProfile(parmDict2,xdata,np.zeros_like(xdata),varyList,bakType) |
---|
5623 | hplot.plot(xdata,(y1-y0)/delt,'r+') |
---|
5624 | |
---|
5625 | def test3(name,delt): |
---|
5626 | if NeedTestData: TestData() |
---|
5627 | names = ['pos','sig','gam','shl'] |
---|
5628 | idx = names.index(name) |
---|
5629 | myDict = {'pos':parmDict2['pos0'],'sig':parmDict2['sig0'],'gam':parmDict2['gam0'],'shl':parmDict2['SH/L']} |
---|
5630 | xdata = np.linspace(5.6,5.8,800) |
---|
5631 | dx = xdata[1]-xdata[0] |
---|
5632 | hplot = plotter.add('derivatives test for '+name).gca() |
---|
5633 | hplot.plot(xdata,100.*dx*getdFCJVoigt3(myDict['pos'],myDict['sig'],myDict['gam'],myDict['shl'],xdata)[idx+1]) |
---|
5634 | y0 = getFCJVoigt3(myDict['pos'],myDict['sig'],myDict['gam'],myDict['shl'],xdata)[0] |
---|
5635 | myDict[name] += delt |
---|
5636 | y1 = getFCJVoigt3(myDict['pos'],myDict['sig'],myDict['gam'],myDict['shl'],xdata)[0] |
---|
5637 | hplot.plot(xdata,(y1-y0)/delt,'r+') |
---|
5638 | |
---|
5639 | if __name__ == '__main__': |
---|
5640 | import GSASIItestplot as plot |
---|
5641 | global plotter |
---|
5642 | plotter = plot.PlotNotebook() |
---|
5643 | # test0() |
---|
5644 | # for name in ['int0','pos0','sig0','gam0','U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)']: |
---|
5645 | for name,shft in [['int0',0.1],['pos0',0.0001],['sig0',0.01],['gam0',0.00001], |
---|
5646 | ['U',0.1],['V',0.01],['W',0.01],['X',0.0001],['Y',0.0001],['Z',0.0001],['SH/L',0.00005]]: |
---|
5647 | test2(name,shft) |
---|
5648 | for name,shft in [['pos',0.0001],['sig',0.01],['gam',0.0001],['shl',0.00005]]: |
---|
5649 | test3(name,shft) |
---|
5650 | G2fil.G2Print ("OK") |
---|
5651 | plotter.StartEventLoop() |
---|
5652 | |
---|
5653 | # GSASIIpath.SetBinaryPath(True,False) |
---|
5654 | # print('found',findfullrmc()) |
---|