1 | #/usr/bin/env python |
---|
2 | # -*- coding: utf-8 -*- |
---|
3 | ''' |
---|
4 | *GSASII powder calculation module* |
---|
5 | ================================== |
---|
6 | |
---|
7 | ''' |
---|
8 | ########### SVN repository information ################### |
---|
9 | # $Date: 2016-09-30 15:33:12 +0000 (Fri, 30 Sep 2016) $ |
---|
10 | # $Author: vondreele $ |
---|
11 | # $Revision: 2481 $ |
---|
12 | # $URL: trunk/GSASIIpwd.py $ |
---|
13 | # $Id: GSASIIpwd.py 2481 2016-09-30 15:33:12Z vondreele $ |
---|
14 | ########### SVN repository information ################### |
---|
15 | import sys |
---|
16 | import math |
---|
17 | import time |
---|
18 | import os |
---|
19 | import subprocess as subp |
---|
20 | |
---|
21 | import numpy as np |
---|
22 | import scipy as sp |
---|
23 | import numpy.linalg as nl |
---|
24 | import numpy.ma as ma |
---|
25 | import random as rand |
---|
26 | from numpy.fft import ifft, fft, fftshift |
---|
27 | import scipy.interpolate as si |
---|
28 | import scipy.stats as st |
---|
29 | import scipy.optimize as so |
---|
30 | |
---|
31 | import GSASIIpath |
---|
32 | GSASIIpath.SetVersionNumber("$Revision: 2481 $") |
---|
33 | import GSASIIlattice as G2lat |
---|
34 | import GSASIIspc as G2spc |
---|
35 | import GSASIIElem as G2elem |
---|
36 | import GSASIIgrid as G2gd |
---|
37 | import GSASIIIO as G2IO |
---|
38 | import GSASIImath as G2mth |
---|
39 | import pypowder as pyd |
---|
40 | try: |
---|
41 | import pydiffax as pyx |
---|
42 | except ImportError: |
---|
43 | print 'pydiffax is not available for this platform - under develpment' |
---|
44 | |
---|
45 | |
---|
46 | # trig functions in degrees |
---|
47 | tand = lambda x: math.tan(x*math.pi/180.) |
---|
48 | atand = lambda x: 180.*math.atan(x)/math.pi |
---|
49 | atan2d = lambda y,x: 180.*math.atan2(y,x)/math.pi |
---|
50 | cosd = lambda x: math.cos(x*math.pi/180.) |
---|
51 | acosd = lambda x: 180.*math.acos(x)/math.pi |
---|
52 | rdsq2d = lambda x,p: round(1.0/math.sqrt(x),p) |
---|
53 | #numpy versions |
---|
54 | npsind = lambda x: np.sin(x*np.pi/180.) |
---|
55 | npasind = lambda x: 180.*np.arcsin(x)/math.pi |
---|
56 | npcosd = lambda x: np.cos(x*math.pi/180.) |
---|
57 | npacosd = lambda x: 180.*np.arccos(x)/math.pi |
---|
58 | nptand = lambda x: np.tan(x*math.pi/180.) |
---|
59 | npatand = lambda x: 180.*np.arctan(x)/np.pi |
---|
60 | npatan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi |
---|
61 | npT2stl = lambda tth, wave: 2.0*npsind(tth/2.0)/wave #=d* |
---|
62 | npT2q = lambda tth,wave: 2.0*np.pi*npT2stl(tth,wave) #=2pi*d* |
---|
63 | ateln2 = 8.0*math.log(2.0) |
---|
64 | |
---|
65 | #GSASII pdf calculation routines |
---|
66 | |
---|
67 | def Transmission(Geometry,Abs,Diam): |
---|
68 | ''' |
---|
69 | Calculate sample transmission |
---|
70 | |
---|
71 | :param str Geometry: one of 'Cylinder','Bragg-Brentano','Tilting flat plate in transmission','Fixed flat plate' |
---|
72 | :param float Abs: absorption coeff in cm-1 |
---|
73 | :param float Diam: sample thickness/diameter in mm |
---|
74 | ''' |
---|
75 | if 'Cylinder' in Geometry: #Lobanov & Alte da Veiga for 2-theta = 0; beam fully illuminates sample |
---|
76 | MuR = Abs*Diam/20.0 |
---|
77 | if MuR <= 3.0: |
---|
78 | T0 = 16/(3.*math.pi) |
---|
79 | T1 = -0.045780 |
---|
80 | T2 = -0.02489 |
---|
81 | T3 = 0.003045 |
---|
82 | T = -T0*MuR-T1*MuR**2-T2*MuR**3-T3*MuR**4 |
---|
83 | if T < -20.: |
---|
84 | return 2.06e-9 |
---|
85 | else: |
---|
86 | return math.exp(T) |
---|
87 | else: |
---|
88 | T1 = 1.433902 |
---|
89 | T2 = 0.013869+0.337894 |
---|
90 | T3 = 1.933433+1.163198 |
---|
91 | T4 = 0.044365-0.04259 |
---|
92 | T = (T1-T4)/(1.0+T2*(MuR-3.0))**T3+T4 |
---|
93 | return T/100. |
---|
94 | elif 'plate' in Geometry: |
---|
95 | MuR = Abs*Diam/10. |
---|
96 | return math.exp(-MuR) |
---|
97 | elif 'Bragg' in Geometry: |
---|
98 | return 0.0 |
---|
99 | |
---|
100 | def SurfaceRough(SRA,SRB,Tth): |
---|
101 | ''' Suortti (J. Appl. Cryst, 5,325-331, 1972) surface roughness correction |
---|
102 | :param float SRA: Suortti surface roughness parameter |
---|
103 | :param float SRB: Suortti surface roughness parameter |
---|
104 | :param float Tth: 2-theta(deg) - can be numpy array |
---|
105 | |
---|
106 | ''' |
---|
107 | sth = npsind(Tth/2.) |
---|
108 | T1 = np.exp(-SRB/sth) |
---|
109 | T2 = SRA+(1.-SRA)*np.exp(-SRB) |
---|
110 | return (SRA+(1.-SRA)*T1)/T2 |
---|
111 | |
---|
112 | def SurfaceRoughDerv(SRA,SRB,Tth): |
---|
113 | ''' Suortti surface roughness correction derivatives |
---|
114 | :param float SRA: Suortti surface roughness parameter (dimensionless) |
---|
115 | :param float SRB: Suortti surface roughness parameter (dimensionless) |
---|
116 | :param float Tth: 2-theta(deg) - can be numpy array |
---|
117 | :return list: [dydSRA,dydSRB] derivatives to be used for intensity derivative |
---|
118 | ''' |
---|
119 | sth = npsind(Tth/2.) |
---|
120 | T1 = np.exp(-SRB/sth) |
---|
121 | T2 = SRA+(1.-SRA)*np.exp(-SRB) |
---|
122 | Trans = (SRA+(1.-SRA)*T1)/T2 |
---|
123 | dydSRA = ((1.-T1)*T2-(1.-np.exp(-SRB))*Trans)/T2**2 |
---|
124 | dydSRB = ((SRA-1.)*T1*T2/sth-Trans*(SRA-T2))/T2**2 |
---|
125 | return [dydSRA,dydSRB] |
---|
126 | |
---|
127 | def Absorb(Geometry,MuR,Tth,Phi=0,Psi=0): |
---|
128 | '''Calculate sample absorption |
---|
129 | :param str Geometry: one of 'Cylinder','Bragg-Brentano','Tilting Flat Plate in transmission','Fixed flat plate' |
---|
130 | :param float MuR: absorption coeff * sample thickness/2 or radius |
---|
131 | :param Tth: 2-theta scattering angle - can be numpy array |
---|
132 | :param float Phi: flat plate tilt angle - future |
---|
133 | :param float Psi: flat plate tilt axis - future |
---|
134 | ''' |
---|
135 | |
---|
136 | def muRunder3(MuR,Sth2): |
---|
137 | T0 = 16.0/(3.*np.pi) |
---|
138 | T1 = (25.99978-0.01911*Sth2**0.25)*np.exp(-0.024551*Sth2)+ \ |
---|
139 | 0.109561*np.sqrt(Sth2)-26.04556 |
---|
140 | T2 = -0.02489-0.39499*Sth2+1.219077*Sth2**1.5- \ |
---|
141 | 1.31268*Sth2**2+0.871081*Sth2**2.5-0.2327*Sth2**3 |
---|
142 | T3 = 0.003045+0.018167*Sth2-0.03305*Sth2**2 |
---|
143 | Trns = -T0*MuR-T1*MuR**2-T2*MuR**3-T3*MuR**4 |
---|
144 | return np.exp(Trns) |
---|
145 | |
---|
146 | def muRover3(MuR,Sth2): |
---|
147 | T1 = 1.433902+11.07504*Sth2-8.77629*Sth2*Sth2+ \ |
---|
148 | 10.02088*Sth2**3-3.36778*Sth2**4 |
---|
149 | T2 = (0.013869-0.01249*Sth2)*np.exp(3.27094*Sth2)+ \ |
---|
150 | (0.337894+13.77317*Sth2)/(1.0+11.53544*Sth2)**1.555039 |
---|
151 | T3 = 1.933433/(1.0+23.12967*Sth2)**1.686715- \ |
---|
152 | 0.13576*np.sqrt(Sth2)+1.163198 |
---|
153 | T4 = 0.044365-0.04259/(1.0+0.41051*Sth2)**148.4202 |
---|
154 | Trns = (T1-T4)/(1.0+T2*(MuR-3.0))**T3+T4 |
---|
155 | return Trns/100. |
---|
156 | |
---|
157 | Sth2 = npsind(Tth/2.0)**2 |
---|
158 | Cth2 = 1.-Sth2 |
---|
159 | if 'Cylinder' in Geometry: #Lobanov & Alte da Veiga for 2-theta = 0; beam fully illuminates sample |
---|
160 | if 'array' in str(type(MuR)): |
---|
161 | MuRSTh2 = np.concatenate((MuR,Sth2)) |
---|
162 | AbsCr = np.where(MuRSTh2[0]<=3.0,muRunder3(MuRSTh2[0],MuRSTh2[1]),muRover3(MuRSTh2[0],MuRSTh2[1])) |
---|
163 | return AbsCr |
---|
164 | else: |
---|
165 | if MuR <= 3.0: |
---|
166 | return muRunder3(MuR,Sth2) |
---|
167 | else: |
---|
168 | return muRover3(MuR,Sth2) |
---|
169 | elif 'Bragg' in Geometry: |
---|
170 | return 1.0 |
---|
171 | elif 'Fixed' in Geometry: #assumes sample plane is perpendicular to incident beam |
---|
172 | # and only defined for 2theta < 90 |
---|
173 | MuT = 2.*MuR |
---|
174 | T1 = np.exp(-MuT) |
---|
175 | T2 = np.exp(-MuT/npcosd(Tth)) |
---|
176 | Tb = MuT-MuT/npcosd(Tth) |
---|
177 | return (T2-T1)/Tb |
---|
178 | elif 'Tilting' in Geometry: #assumes symmetric tilt so sample plane is parallel to diffraction vector |
---|
179 | MuT = 2.*MuR |
---|
180 | cth = npcosd(Tth/2.0) |
---|
181 | return np.exp(-MuT/cth)/cth |
---|
182 | |
---|
183 | def AbsorbDerv(Geometry,MuR,Tth,Phi=0,Psi=0): |
---|
184 | 'needs a doc string' |
---|
185 | dA = 0.001 |
---|
186 | AbsP = Absorb(Geometry,MuR+dA,Tth,Phi,Psi) |
---|
187 | if MuR: |
---|
188 | AbsM = Absorb(Geometry,MuR-dA,Tth,Phi,Psi) |
---|
189 | return (AbsP-AbsM)/(2.0*dA) |
---|
190 | else: |
---|
191 | return (AbsP-1.)/dA |
---|
192 | |
---|
193 | def Polarization(Pola,Tth,Azm=0.0): |
---|
194 | """ Calculate angle dependent x-ray polarization correction (not scaled correctly!) |
---|
195 | |
---|
196 | :param Pola: polarization coefficient e.g 1.0 fully polarized, 0.5 unpolarized |
---|
197 | :param Azm: azimuthal angle e.g. 0.0 in plane of polarization |
---|
198 | :param Tth: 2-theta scattering angle - can be numpy array |
---|
199 | which (if either) of these is "right"? |
---|
200 | :return: (pola, dpdPola) |
---|
201 | * pola = ((1-Pola)*npcosd(Azm)**2+Pola*npsind(Azm)**2)*npcosd(Tth)**2+ \ |
---|
202 | (1-Pola)*npsind(Azm)**2+Pola*npcosd(Azm)**2 |
---|
203 | * dpdPola: derivative needed for least squares |
---|
204 | |
---|
205 | """ |
---|
206 | pola = ((1.0-Pola)*npcosd(Azm)**2+Pola*npsind(Azm)**2)*npcosd(Tth)**2+ \ |
---|
207 | (1.0-Pola)*npsind(Azm)**2+Pola*npcosd(Azm)**2 |
---|
208 | dpdPola = -npsind(Tth)**2*(npsind(Azm)**2-npcosd(Azm)**2) |
---|
209 | return pola,dpdPola |
---|
210 | |
---|
211 | def Oblique(ObCoeff,Tth): |
---|
212 | 'currently assumes detector is normal to beam' |
---|
213 | if ObCoeff: |
---|
214 | return (1.-ObCoeff)/(1.0-np.exp(np.log(ObCoeff)/npcosd(Tth))) |
---|
215 | else: |
---|
216 | return 1.0 |
---|
217 | |
---|
218 | def Ruland(RulCoff,wave,Q,Compton): |
---|
219 | 'needs a doc string' |
---|
220 | C = 2.9978e8 |
---|
221 | D = 1.5e-3 |
---|
222 | hmc = 0.024262734687 |
---|
223 | sinth2 = (Q*wave/(4.0*np.pi))**2 |
---|
224 | dlam = (wave**2)*Compton*Q/C |
---|
225 | dlam_c = 2.0*hmc*sinth2-D*wave**2 |
---|
226 | return 1.0/((1.0+dlam/RulCoff)*(1.0+(np.pi*dlam_c/(dlam+RulCoff))**2)) |
---|
227 | |
---|
228 | def LorchWeight(Q): |
---|
229 | 'needs a doc string' |
---|
230 | return np.sin(np.pi*(Q[-1]-Q)/(2.0*Q[-1])) |
---|
231 | |
---|
232 | def GetAsfMean(ElList,Sthl2): |
---|
233 | '''Calculate various scattering factor terms for PDF calcs |
---|
234 | |
---|
235 | :param dict ElList: element dictionary contains scattering factor coefficients, etc. |
---|
236 | :param np.array Sthl2: numpy array of sin theta/lambda squared values |
---|
237 | :returns: mean(f^2), mean(f)^2, mean(compton) |
---|
238 | ''' |
---|
239 | sumNoAtoms = 0.0 |
---|
240 | FF = np.zeros_like(Sthl2) |
---|
241 | FF2 = np.zeros_like(Sthl2) |
---|
242 | CF = np.zeros_like(Sthl2) |
---|
243 | for El in ElList: |
---|
244 | sumNoAtoms += ElList[El]['FormulaNo'] |
---|
245 | for El in ElList: |
---|
246 | el = ElList[El] |
---|
247 | ff2 = (G2elem.ScatFac(el,Sthl2)+el['fp'])**2+el['fpp']**2 |
---|
248 | cf = G2elem.ComptonFac(el,Sthl2) |
---|
249 | FF += np.sqrt(ff2)*el['FormulaNo']/sumNoAtoms |
---|
250 | FF2 += ff2*el['FormulaNo']/sumNoAtoms |
---|
251 | CF += cf*el['FormulaNo']/sumNoAtoms |
---|
252 | return FF2,FF**2,CF |
---|
253 | |
---|
254 | def GetNumDensity(ElList,Vol): |
---|
255 | 'needs a doc string' |
---|
256 | sumNoAtoms = 0.0 |
---|
257 | for El in ElList: |
---|
258 | sumNoAtoms += ElList[El]['FormulaNo'] |
---|
259 | return sumNoAtoms/Vol |
---|
260 | |
---|
261 | def CalcPDF(data,inst,limits,xydata): |
---|
262 | 'needs a doc string' |
---|
263 | auxPlot = [] |
---|
264 | import copy |
---|
265 | import scipy.fftpack as ft |
---|
266 | Ibeg = np.searchsorted(xydata['Sample'][1][0],limits[0]) |
---|
267 | Ifin = np.searchsorted(xydata['Sample'][1][0],limits[1]) |
---|
268 | #subtract backgrounds - if any & use PWDR limits |
---|
269 | # GSASIIpath.IPyBreak() |
---|
270 | xydata['IofQ'] = copy.deepcopy(xydata['Sample']) |
---|
271 | xydata['IofQ'][1] = np.array(xydata['IofQ'][1])[:,Ibeg:Ifin] |
---|
272 | if data['Sample Bkg.']['Name']: |
---|
273 | xydata['IofQ'][1][1] += (xydata['Sample Bkg.'][1][1][Ibeg:Ifin]+ |
---|
274 | data['Sample Bkg.']['Add'])*data['Sample Bkg.']['Mult'] |
---|
275 | if data['Container']['Name']: |
---|
276 | xycontainer = (xydata['Container'][1][1]+data['Container']['Add'])*data['Container']['Mult'] |
---|
277 | if data['Container Bkg.']['Name']: |
---|
278 | xycontainer += (xydata['Container Bkg.'][1][1][Ibeg:Ifin]+ |
---|
279 | data['Container Bkg.']['Add'])*data['Container Bkg.']['Mult'] |
---|
280 | xydata['IofQ'][1][1] += xycontainer |
---|
281 | #get element data & absorption coeff. |
---|
282 | ElList = data['ElList'] |
---|
283 | Abs = G2lat.CellAbsorption(ElList,data['Form Vol']) |
---|
284 | #Apply angle dependent corrections |
---|
285 | Tth = xydata['IofQ'][1][0] |
---|
286 | dt = (Tth[1]-Tth[0]) |
---|
287 | MuR = Abs*data['Diam']/20.0 |
---|
288 | xydata['IofQ'][1][1] /= Absorb(data['Geometry'],MuR,Tth) |
---|
289 | if 'X' in inst['Type'][0]: |
---|
290 | xydata['IofQ'][1][1] /= Polarization(inst['Polariz.'][1],Tth,Azm=inst['Azimuth'][1])[0] |
---|
291 | if data['DetType'] == 'Image plate': |
---|
292 | xydata['IofQ'][1][1] *= Oblique(data['ObliqCoeff'],Tth) |
---|
293 | XY = xydata['IofQ'][1] |
---|
294 | #convert to Q |
---|
295 | if 'C' in inst['Type'][0]: |
---|
296 | hc = 12.397639 |
---|
297 | wave = G2mth.getWave(inst) |
---|
298 | keV = hc/wave |
---|
299 | minQ = npT2q(Tth[0],wave) |
---|
300 | maxQ = npT2q(Tth[-1],wave) |
---|
301 | Qpoints = np.linspace(0.,maxQ,len(XY[0]),endpoint=True) |
---|
302 | dq = Qpoints[1]-Qpoints[0] |
---|
303 | XY[0] = npT2q(XY[0],wave) |
---|
304 | elif 'T' in inst['Type'][0]: |
---|
305 | difC = inst['difC'][1] |
---|
306 | minQ = 2.*np.pi*difC/Tth[-1] |
---|
307 | maxQ = 2.*np.pi*difC/Tth[0] |
---|
308 | Qpoints = np.linspace(0.,maxQ,len(XY[0]),endpoint=True) |
---|
309 | dq = Qpoints[1]-Qpoints[0] |
---|
310 | XY[0] = 2.*np.pi*difC/XY[0] |
---|
311 | Qdata = si.griddata(XY[0],XY[1],Qpoints,method='linear',fill_value=XY[1][0]) |
---|
312 | Qdata -= np.min(Qdata)*data['BackRatio'] |
---|
313 | |
---|
314 | qLimits = data['QScaleLim'] |
---|
315 | minQ = np.searchsorted(Qpoints,qLimits[0]) |
---|
316 | maxQ = np.searchsorted(Qpoints,qLimits[1]) |
---|
317 | newdata = [] |
---|
318 | xydata['IofQ'][1][0] = Qpoints |
---|
319 | xydata['IofQ'][1][1] = Qdata |
---|
320 | for item in xydata['IofQ'][1]: |
---|
321 | newdata.append(item[:maxQ]) |
---|
322 | xydata['IofQ'][1] = newdata |
---|
323 | |
---|
324 | xydata['SofQ'] = copy.deepcopy(xydata['IofQ']) |
---|
325 | FFSq,SqFF,CF = GetAsfMean(ElList,(xydata['SofQ'][1][0]/(4.0*np.pi))**2) #these are <f^2>,<f>^2,Cf |
---|
326 | Q = xydata['SofQ'][1][0] |
---|
327 | # auxPlot.append([Q,np.copy(CF),'CF-unCorr']) |
---|
328 | ruland = Ruland(data['Ruland'],wave,Q,CF) |
---|
329 | # auxPlot.append([Q,ruland,'Ruland']) |
---|
330 | CF *= ruland |
---|
331 | # auxPlot.append([Q,CF,'CF-Corr']) |
---|
332 | scale = np.sum((FFSq+CF)[minQ:maxQ])/np.sum(xydata['SofQ'][1][1][minQ:maxQ]) |
---|
333 | xydata['SofQ'][1][1] *= scale |
---|
334 | xydata['SofQ'][1][1] -= CF |
---|
335 | xydata['SofQ'][1][1] = xydata['SofQ'][1][1]/SqFF |
---|
336 | scale = len(xydata['SofQ'][1][1][minQ:maxQ])/np.sum(xydata['SofQ'][1][1][minQ:maxQ]) |
---|
337 | xydata['SofQ'][1][1] *= scale |
---|
338 | xydata['FofQ'] = copy.deepcopy(xydata['SofQ']) |
---|
339 | xydata['FofQ'][1][1] = xydata['FofQ'][1][0]*(xydata['SofQ'][1][1]-1.0) |
---|
340 | if data['Lorch']: |
---|
341 | xydata['FofQ'][1][1] *= LorchWeight(Q) |
---|
342 | xydata['GofR'] = copy.deepcopy(xydata['FofQ']) |
---|
343 | nR = len(xydata['GofR'][1][1]) |
---|
344 | xydata['GofR'][1][1] = -dq*np.imag(ft.fft(xydata['FofQ'][1][1],4*nR)[:nR]) |
---|
345 | xydata['GofR'][1][0] = 0.5*np.pi*np.linspace(0,nR,nR)/qLimits[1] |
---|
346 | return auxPlot |
---|
347 | |
---|
348 | def MakeRDF(RDFcontrols,background,inst,pwddata): |
---|
349 | import scipy.fftpack as ft |
---|
350 | import scipy.signal as signal |
---|
351 | auxPlot = [] |
---|
352 | if 'C' in inst['Type'][0]: |
---|
353 | Tth = pwddata[0] |
---|
354 | wave = G2mth.getWave(inst) |
---|
355 | minQ = npT2q(Tth[0],wave) |
---|
356 | maxQ = npT2q(Tth[-1],wave) |
---|
357 | powQ = npT2q(Tth,wave) |
---|
358 | elif 'T' in inst['Type'][0]: |
---|
359 | TOF = pwddata[0] |
---|
360 | difC = inst['difC'][1] |
---|
361 | minQ = 2.*np.pi*difC/TOF[-1] |
---|
362 | maxQ = 2.*np.pi*difC/TOF[0] |
---|
363 | powQ = 2.*np.pi*difC/TOF |
---|
364 | piDQ = np.pi/(maxQ-minQ) |
---|
365 | Qpoints = np.linspace(minQ,maxQ,len(pwddata[0]),endpoint=True) |
---|
366 | if RDFcontrols['UseObsCalc']: |
---|
367 | Qdata = si.griddata(powQ,pwddata[1]-pwddata[3],Qpoints,method=RDFcontrols['Smooth'],fill_value=0.) |
---|
368 | else: |
---|
369 | Qdata = si.griddata(powQ,pwddata[1]-pwddata[4],Qpoints,method=RDFcontrols['Smooth'],fill_value=pwddata[1][0]) |
---|
370 | Qdata *= np.sin((Qpoints-minQ)*piDQ)/piDQ |
---|
371 | Qdata *= 0.5*np.sqrt(Qpoints) #Qbin normalization |
---|
372 | # GSASIIpath.IPyBreak() |
---|
373 | dq = Qpoints[1]-Qpoints[0] |
---|
374 | nR = len(Qdata) |
---|
375 | R = 0.5*np.pi*np.linspace(0,nR,nR)/(4.*maxQ) |
---|
376 | iFin = np.searchsorted(R,RDFcontrols['maxR']) |
---|
377 | bBut,aBut = signal.butter(4,0.01) |
---|
378 | Qsmooth = signal.filtfilt(bBut,aBut,Qdata) |
---|
379 | # auxPlot.append([Qpoints,Qdata,'interpolate:'+RDFcontrols['Smooth']]) |
---|
380 | # auxPlot.append([Qpoints,Qsmooth,'interpolate:'+RDFcontrols['Smooth']]) |
---|
381 | DofR = dq*np.imag(ft.fft(Qsmooth,16*nR)[:nR]) |
---|
382 | auxPlot.append([R[:iFin],DofR[:iFin],'D(R)']) |
---|
383 | return auxPlot |
---|
384 | |
---|
385 | ################################################################################ |
---|
386 | #GSASII peak fitting routines: Finger, Cox & Jephcoat model |
---|
387 | ################################################################################ |
---|
388 | |
---|
389 | def factorize(num): |
---|
390 | ''' Provide prime number factors for integer num |
---|
391 | :returns: dictionary of prime factors (keys) & power for each (data) |
---|
392 | ''' |
---|
393 | factors = {} |
---|
394 | orig = num |
---|
395 | |
---|
396 | # we take advantage of the fact that (i +1)**2 = i**2 + 2*i +1 |
---|
397 | i, sqi = 2, 4 |
---|
398 | while sqi <= num: |
---|
399 | while not num%i: |
---|
400 | num /= i |
---|
401 | factors[i] = factors.get(i, 0) + 1 |
---|
402 | |
---|
403 | sqi += 2*i + 1 |
---|
404 | i += 1 |
---|
405 | |
---|
406 | if num != 1 and num != orig: |
---|
407 | factors[num] = factors.get(num, 0) + 1 |
---|
408 | |
---|
409 | if factors: |
---|
410 | return factors |
---|
411 | else: |
---|
412 | return {num:1} #a prime number! |
---|
413 | |
---|
414 | def makeFFTsizeList(nmin=1,nmax=1023,thresh=15): |
---|
415 | ''' Provide list of optimal data sizes for FFT calculations |
---|
416 | |
---|
417 | :param int nmin: minimum data size >= 1 |
---|
418 | :param int nmax: maximum data size > nmin |
---|
419 | :param int thresh: maximum prime factor allowed |
---|
420 | :Returns: list of data sizes where the maximum prime factor is < thresh |
---|
421 | ''' |
---|
422 | plist = [] |
---|
423 | nmin = max(1,nmin) |
---|
424 | nmax = max(nmin+1,nmax) |
---|
425 | for p in range(nmin,nmax): |
---|
426 | if max(factorize(p).keys()) < thresh: |
---|
427 | plist.append(p) |
---|
428 | return plist |
---|
429 | |
---|
430 | np.seterr(divide='ignore') |
---|
431 | |
---|
432 | # Normal distribution |
---|
433 | |
---|
434 | # loc = mu, scale = std |
---|
435 | _norm_pdf_C = 1./math.sqrt(2*math.pi) |
---|
436 | class norm_gen(st.rv_continuous): |
---|
437 | 'needs a doc string' |
---|
438 | |
---|
439 | def pdf(self,x,*args,**kwds): |
---|
440 | loc,scale=kwds['loc'],kwds['scale'] |
---|
441 | x = (x-loc)/scale |
---|
442 | return np.exp(-x**2/2.0) * _norm_pdf_C / scale |
---|
443 | |
---|
444 | norm = norm_gen(name='norm',longname='A normal',extradoc=""" |
---|
445 | |
---|
446 | Normal distribution |
---|
447 | |
---|
448 | The location (loc) keyword specifies the mean. |
---|
449 | The scale (scale) keyword specifies the standard deviation. |
---|
450 | |
---|
451 | normal.pdf(x) = exp(-x**2/2)/sqrt(2*pi) |
---|
452 | """) |
---|
453 | |
---|
454 | ## Cauchy |
---|
455 | |
---|
456 | # median = loc |
---|
457 | |
---|
458 | class cauchy_gen(st.rv_continuous): |
---|
459 | 'needs a doc string' |
---|
460 | |
---|
461 | def pdf(self,x,*args,**kwds): |
---|
462 | loc,scale=kwds['loc'],kwds['scale'] |
---|
463 | x = (x-loc)/scale |
---|
464 | return 1.0/np.pi/(1.0+x*x) / scale |
---|
465 | |
---|
466 | cauchy = cauchy_gen(name='cauchy',longname='Cauchy',extradoc=""" |
---|
467 | |
---|
468 | Cauchy distribution |
---|
469 | |
---|
470 | cauchy.pdf(x) = 1/(pi*(1+x**2)) |
---|
471 | |
---|
472 | This is the t distribution with one degree of freedom. |
---|
473 | """) |
---|
474 | |
---|
475 | |
---|
476 | #GSASII peak fitting routine: Finger, Cox & Jephcoat model |
---|
477 | |
---|
478 | |
---|
479 | class fcjde_gen(st.rv_continuous): |
---|
480 | """ |
---|
481 | Finger-Cox-Jephcoat D(2phi,2th) function for S/L = H/L |
---|
482 | Ref: J. Appl. Cryst. (1994) 27, 892-900. |
---|
483 | |
---|
484 | :param x: array -1 to 1 |
---|
485 | :param t: 2-theta position of peak |
---|
486 | :param s: sum(S/L,H/L); S: sample height, H: detector opening, |
---|
487 | L: sample to detector opening distance |
---|
488 | :param dx: 2-theta step size in deg |
---|
489 | |
---|
490 | :returns: for fcj.pdf |
---|
491 | |
---|
492 | * T = x*dx+t |
---|
493 | * s = S/L+H/L |
---|
494 | * if x < 0:: |
---|
495 | |
---|
496 | fcj.pdf = [1/sqrt({cos(T)**2/cos(t)**2}-1) - 1/s]/|cos(T)| |
---|
497 | |
---|
498 | * if x >= 0: fcj.pdf = 0 |
---|
499 | """ |
---|
500 | def _pdf(self,x,t,s,dx): |
---|
501 | T = dx*x+t |
---|
502 | ax2 = abs(npcosd(T)) |
---|
503 | ax = ax2**2 |
---|
504 | bx = npcosd(t)**2 |
---|
505 | bx = np.where(ax>bx,bx,ax) |
---|
506 | fx = np.where(ax>bx,(np.sqrt(bx/(ax-bx))-1./s)/ax2,0.0) |
---|
507 | fx = np.where(fx > 0.,fx,0.0) |
---|
508 | return fx |
---|
509 | |
---|
510 | def pdf(self,x,*args,**kwds): |
---|
511 | loc=kwds['loc'] |
---|
512 | return self._pdf(x-loc,*args) |
---|
513 | |
---|
514 | fcjde = fcjde_gen(name='fcjde',shapes='t,s,dx') |
---|
515 | |
---|
516 | def getWidthsCW(pos,sig,gam,shl): |
---|
517 | '''Compute the peak widths used for computing the range of a peak |
---|
518 | for constant wavelength data. On low-angle side, 10 FWHM are used, |
---|
519 | on high-angle side 15 are used, low angle side extended by axial divergence |
---|
520 | (for peaks above 90 deg, these are reversed.) |
---|
521 | ''' |
---|
522 | widths = [np.sqrt(sig)/100.,gam/100.] |
---|
523 | fwhm = 2.355*widths[0]+widths[1] |
---|
524 | fmin = 10.*(fwhm+shl*abs(npcosd(pos))) |
---|
525 | fmax = 15.0*fwhm |
---|
526 | if pos > 90: |
---|
527 | fmin,fmax = [fmax,fmin] |
---|
528 | return widths,fmin,fmax |
---|
529 | |
---|
530 | def getWidthsTOF(pos,alp,bet,sig,gam): |
---|
531 | '''Compute the peak widths used for computing the range of a peak |
---|
532 | for constant wavelength data. 10 FWHM are used on both sides each |
---|
533 | extended by exponential coeff. |
---|
534 | ''' |
---|
535 | lnf = 3.3 # =log(0.001/2) |
---|
536 | widths = [np.sqrt(sig),gam] |
---|
537 | fwhm = 2.355*widths[0]+2.*widths[1] |
---|
538 | fmin = 10.*fwhm*(1.+1./alp) |
---|
539 | fmax = 10.*fwhm*(1.+1./bet) |
---|
540 | return widths,fmin,fmax |
---|
541 | |
---|
542 | def getFWHM(pos,Inst): |
---|
543 | '''Compute total FWHM from Thompson, Cox & Hastings (1987), J. Appl. Cryst. 20, 79-83 |
---|
544 | via getgamFW(g,s). |
---|
545 | |
---|
546 | :param pos: float peak position in deg 2-theta or tof in musec |
---|
547 | :param Inst: dict instrument parameters |
---|
548 | |
---|
549 | :returns float: total FWHM of pseudoVoigt in deg or musec |
---|
550 | ''' |
---|
551 | |
---|
552 | sig = lambda Th,U,V,W: np.sqrt(max(0.001,U*tand(Th)**2+V*tand(Th)+W)) |
---|
553 | sigTOF = lambda dsp,S0,S1,S2,Sq: S0+S1*dsp**2+S2*dsp**4+Sq/dsp**2 |
---|
554 | gam = lambda Th,X,Y: (X/cosd(Th)+Y*tand(Th)) |
---|
555 | gamTOF = lambda dsp,X,Y: X*dsp+Y*dsp**2 |
---|
556 | if 'C' in Inst['Type'][0]: |
---|
557 | s = sig(pos/2.,Inst['U'][1],Inst['V'][1],Inst['W'][1]) |
---|
558 | g = gam(pos/2.,Inst['X'][1],Inst['Y'][1]) |
---|
559 | return getgamFW(g,s)/100. #returns FWHM in deg |
---|
560 | else: |
---|
561 | dsp = pos/Inst['difC'][0] |
---|
562 | s = sigTOF(dsp,Inst['sig-0'][1],Inst['sig-1'][1],Inst['sig-2'][1],Inst['sig-q'][1]) |
---|
563 | g = gamTOF(dsp,Inst['X'][1],Inst['Y'][1]) |
---|
564 | return getgamFW(g,s) |
---|
565 | |
---|
566 | def getgamFW(g,s): |
---|
567 | '''Compute total FWHM from Thompson, Cox & Hastings (1987), J. Appl. Cryst. 20, 79-83 |
---|
568 | lambda fxn needs FWHM for both Gaussian & Lorentzian components |
---|
569 | |
---|
570 | :param g: float Lorentzian gamma = FWHM(L) |
---|
571 | :param s: float Gaussian sig |
---|
572 | |
---|
573 | :returns float: total FWHM of pseudoVoigt |
---|
574 | ''' |
---|
575 | gamFW = lambda s,g: np.exp(np.log(s**5+2.69269*s**4*g+2.42843*s**3*g**2+4.47163*s**2*g**3+0.07842*s*g**4+g**5)/5.) |
---|
576 | return gamFW(2.35482*s,g) #sqrt(8ln2)*sig = FWHM(G) |
---|
577 | |
---|
578 | def getFCJVoigt(pos,intens,sig,gam,shl,xdata): |
---|
579 | 'needs a doc string' |
---|
580 | DX = xdata[1]-xdata[0] |
---|
581 | widths,fmin,fmax = getWidthsCW(pos,sig,gam,shl) |
---|
582 | x = np.linspace(pos-fmin,pos+fmin,256) |
---|
583 | dx = x[1]-x[0] |
---|
584 | Norm = norm.pdf(x,loc=pos,scale=widths[0]) |
---|
585 | Cauchy = cauchy.pdf(x,loc=pos,scale=widths[1]) |
---|
586 | arg = [pos,shl/57.2958,dx,] |
---|
587 | FCJ = fcjde.pdf(x,*arg,loc=pos) |
---|
588 | if len(np.nonzero(FCJ)[0])>5: |
---|
589 | z = np.column_stack([Norm,Cauchy,FCJ]).T |
---|
590 | Z = fft(z) |
---|
591 | Df = ifft(Z.prod(axis=0)).real |
---|
592 | else: |
---|
593 | z = np.column_stack([Norm,Cauchy]).T |
---|
594 | Z = fft(z) |
---|
595 | Df = fftshift(ifft(Z.prod(axis=0))).real |
---|
596 | Df /= np.sum(Df) |
---|
597 | Df = si.interp1d(x,Df,bounds_error=False,fill_value=0.0) |
---|
598 | return intens*Df(xdata)*DX/dx |
---|
599 | |
---|
600 | def getBackground(pfx,parmDict,bakType,dataType,xdata): |
---|
601 | 'needs a doc string' |
---|
602 | if 'T' in dataType: |
---|
603 | q = 2.*np.pi*parmDict[pfx+'difC']/xdata |
---|
604 | elif 'C' in dataType: |
---|
605 | wave = parmDict.get(pfx+'Lam',parmDict.get(pfx+'Lam1',1.0)) |
---|
606 | q = npT2q(xdata,wave) |
---|
607 | yb = np.zeros_like(xdata) |
---|
608 | nBak = 0 |
---|
609 | cw = np.diff(xdata) |
---|
610 | cw = np.append(cw,cw[-1]) |
---|
611 | sumBk = [0.,0.,0] |
---|
612 | while True: |
---|
613 | key = pfx+'Back;'+str(nBak) |
---|
614 | if key in parmDict: |
---|
615 | nBak += 1 |
---|
616 | else: |
---|
617 | break |
---|
618 | #empirical functions |
---|
619 | if bakType in ['chebyschev','cosine']: |
---|
620 | dt = xdata[-1]-xdata[0] |
---|
621 | for iBak in range(nBak): |
---|
622 | key = pfx+'Back;'+str(iBak) |
---|
623 | if bakType == 'chebyschev': |
---|
624 | ybi = parmDict[key]*(-1.+2.*(xdata-xdata[0])/dt)**iBak |
---|
625 | elif bakType == 'cosine': |
---|
626 | ybi = parmDict[key]*npcosd(180.*xdata*iBak/xdata[-1]) |
---|
627 | yb += ybi |
---|
628 | sumBk[0] = np.sum(yb) |
---|
629 | elif bakType in ['Q^2 power series','Q^-2 power series']: |
---|
630 | QT = 1. |
---|
631 | yb += np.ones_like(yb)*parmDict[pfx+'Back;0'] |
---|
632 | for iBak in range(nBak-1): |
---|
633 | key = pfx+'Back;'+str(iBak+1) |
---|
634 | if '-2' in bakType: |
---|
635 | QT *= (iBak+1)*q**-2 |
---|
636 | else: |
---|
637 | QT *= q**2/(iBak+1) |
---|
638 | yb += QT*parmDict[key] |
---|
639 | sumBk[0] = np.sum(yb) |
---|
640 | elif bakType in ['lin interpolate','inv interpolate','log interpolate',]: |
---|
641 | if nBak == 1: |
---|
642 | yb = np.ones_like(xdata)*parmDict[pfx+'Back;0'] |
---|
643 | elif nBak == 2: |
---|
644 | dX = xdata[-1]-xdata[0] |
---|
645 | T2 = (xdata-xdata[0])/dX |
---|
646 | T1 = 1.0-T2 |
---|
647 | yb = parmDict[pfx+'Back;0']*T1+parmDict[pfx+'Back;1']*T2 |
---|
648 | else: |
---|
649 | if bakType == 'lin interpolate': |
---|
650 | bakPos = np.linspace(xdata[0],xdata[-1],nBak,True) |
---|
651 | elif bakType == 'inv interpolate': |
---|
652 | bakPos = 1./np.linspace(1./xdata[-1],1./xdata[0],nBak,True) |
---|
653 | elif bakType == 'log interpolate': |
---|
654 | bakPos = np.exp(np.linspace(np.log(xdata[0]),np.log(xdata[-1]),nBak,True)) |
---|
655 | bakPos[0] = xdata[0] |
---|
656 | bakPos[-1] = xdata[-1] |
---|
657 | bakVals = np.zeros(nBak) |
---|
658 | for i in range(nBak): |
---|
659 | bakVals[i] = parmDict[pfx+'Back;'+str(i)] |
---|
660 | bakInt = si.interp1d(bakPos,bakVals,'linear') |
---|
661 | yb = bakInt(ma.getdata(xdata)) |
---|
662 | sumBk[0] = np.sum(yb) |
---|
663 | #Debye function |
---|
664 | if pfx+'difC' in parmDict: |
---|
665 | ff = 1. |
---|
666 | else: |
---|
667 | try: |
---|
668 | wave = parmDict[pfx+'Lam'] |
---|
669 | except KeyError: |
---|
670 | wave = parmDict[pfx+'Lam1'] |
---|
671 | SQ = (q/(4.*np.pi))**2 |
---|
672 | FF = G2elem.GetFormFactorCoeff('Si')[0] |
---|
673 | ff = np.array(G2elem.ScatFac(FF,SQ)[0])**2 |
---|
674 | iD = 0 |
---|
675 | while True: |
---|
676 | try: |
---|
677 | dbA = parmDict[pfx+'DebyeA;'+str(iD)] |
---|
678 | dbR = parmDict[pfx+'DebyeR;'+str(iD)] |
---|
679 | dbU = parmDict[pfx+'DebyeU;'+str(iD)] |
---|
680 | ybi = ff*dbA*np.sin(q*dbR)*np.exp(-dbU*q**2)/(q*dbR) |
---|
681 | yb += ybi |
---|
682 | sumBk[1] += np.sum(ybi) |
---|
683 | iD += 1 |
---|
684 | except KeyError: |
---|
685 | break |
---|
686 | #peaks |
---|
687 | iD = 0 |
---|
688 | while True: |
---|
689 | try: |
---|
690 | pkP = parmDict[pfx+'BkPkpos;'+str(iD)] |
---|
691 | pkI = parmDict[pfx+'BkPkint;'+str(iD)] |
---|
692 | pkS = parmDict[pfx+'BkPksig;'+str(iD)] |
---|
693 | pkG = parmDict[pfx+'BkPkgam;'+str(iD)] |
---|
694 | if 'C' in dataType: |
---|
695 | Wd,fmin,fmax = getWidthsCW(pkP,pkS,pkG,.002) |
---|
696 | else: #'T'OF |
---|
697 | Wd,fmin,fmax = getWidthsTOF(pkP,1.,1.,pkS,pkG) |
---|
698 | iBeg = np.searchsorted(xdata,pkP-fmin) |
---|
699 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
700 | lenX = len(xdata) |
---|
701 | if not iBeg: |
---|
702 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
703 | elif iBeg == lenX: |
---|
704 | iFin = iBeg |
---|
705 | else: |
---|
706 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
707 | if 'C' in dataType: |
---|
708 | ybi = pkI*getFCJVoigt3(pkP,pkS,pkG,0.002,xdata[iBeg:iFin]) |
---|
709 | yb[iBeg:iFin] += ybi |
---|
710 | else: #'T'OF |
---|
711 | ybi = pkI*getEpsVoigt(pkP,1.,1.,pkS,pkG,xdata[iBeg:iFin]) |
---|
712 | yb[iBeg:iFin] += ybi |
---|
713 | sumBk[2] += np.sum(ybi) |
---|
714 | iD += 1 |
---|
715 | except KeyError: |
---|
716 | break |
---|
717 | except ValueError: |
---|
718 | print '**** WARNING - backround peak '+str(iD)+' sigma is negative; fix & try again ****' |
---|
719 | break |
---|
720 | return yb,sumBk |
---|
721 | |
---|
722 | def getBackgroundDerv(hfx,parmDict,bakType,dataType,xdata): |
---|
723 | 'needs a doc string' |
---|
724 | if 'T' in dataType: |
---|
725 | q = 2.*np.pi*parmDict[hfx+'difC']/xdata |
---|
726 | elif 'C' in dataType: |
---|
727 | wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0)) |
---|
728 | q = 2.*np.pi*npsind(xdata/2.)/wave |
---|
729 | nBak = 0 |
---|
730 | while True: |
---|
731 | key = hfx+'Back;'+str(nBak) |
---|
732 | if key in parmDict: |
---|
733 | nBak += 1 |
---|
734 | else: |
---|
735 | break |
---|
736 | dydb = np.zeros(shape=(nBak,len(xdata))) |
---|
737 | dyddb = np.zeros(shape=(3*parmDict[hfx+'nDebye'],len(xdata))) |
---|
738 | dydpk = np.zeros(shape=(4*parmDict[hfx+'nPeaks'],len(xdata))) |
---|
739 | cw = np.diff(xdata) |
---|
740 | cw = np.append(cw,cw[-1]) |
---|
741 | |
---|
742 | if bakType in ['chebyschev','cosine']: |
---|
743 | dt = xdata[-1]-xdata[0] |
---|
744 | for iBak in range(nBak): |
---|
745 | if bakType == 'chebyschev': |
---|
746 | dydb[iBak] = (-1.+2.*(xdata-xdata[0])/dt)**iBak |
---|
747 | elif bakType == 'cosine': |
---|
748 | dydb[iBak] = npcosd(180.*xdata*iBak/xdata[-1]) |
---|
749 | elif bakType in ['Q^2 power series','Q^-2 power series']: |
---|
750 | QT = 1. |
---|
751 | dydb[0] = np.ones_like(xdata) |
---|
752 | for iBak in range(nBak-1): |
---|
753 | if '-2' in bakType: |
---|
754 | QT *= (iBak+1)*q**-2 |
---|
755 | else: |
---|
756 | QT *= q**2/(iBak+1) |
---|
757 | dydb[iBak+1] = QT |
---|
758 | elif bakType in ['lin interpolate','inv interpolate','log interpolate',]: |
---|
759 | if nBak == 1: |
---|
760 | dydb[0] = np.ones_like(xdata) |
---|
761 | elif nBak == 2: |
---|
762 | dX = xdata[-1]-xdata[0] |
---|
763 | T2 = (xdata-xdata[0])/dX |
---|
764 | T1 = 1.0-T2 |
---|
765 | dydb = [T1,T2] |
---|
766 | else: |
---|
767 | if bakType == 'lin interpolate': |
---|
768 | bakPos = np.linspace(xdata[0],xdata[-1],nBak,True) |
---|
769 | elif bakType == 'inv interpolate': |
---|
770 | bakPos = 1./np.linspace(1./xdata[-1],1./xdata[0],nBak,True) |
---|
771 | elif bakType == 'log interpolate': |
---|
772 | bakPos = np.exp(np.linspace(np.log(xdata[0]),np.log(xdata[-1]),nBak,True)) |
---|
773 | bakPos[0] = xdata[0] |
---|
774 | bakPos[-1] = xdata[-1] |
---|
775 | for i,pos in enumerate(bakPos): |
---|
776 | if i == 0: |
---|
777 | dydb[0] = np.where(xdata<bakPos[1],(bakPos[1]-xdata)/(bakPos[1]-bakPos[0]),0.) |
---|
778 | elif i == len(bakPos)-1: |
---|
779 | dydb[i] = np.where(xdata>bakPos[-2],(bakPos[-1]-xdata)/(bakPos[-1]-bakPos[-2]),0.) |
---|
780 | else: |
---|
781 | dydb[i] = np.where(xdata>bakPos[i], |
---|
782 | np.where(xdata<bakPos[i+1],(bakPos[i+1]-xdata)/(bakPos[i+1]-bakPos[i]),0.), |
---|
783 | np.where(xdata>bakPos[i-1],(xdata-bakPos[i-1])/(bakPos[i]-bakPos[i-1]),0.)) |
---|
784 | if hfx+'difC' in parmDict: |
---|
785 | ff = 1. |
---|
786 | else: |
---|
787 | wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0)) |
---|
788 | q = npT2q(xdata,wave) |
---|
789 | SQ = (q/(4*np.pi))**2 |
---|
790 | FF = G2elem.GetFormFactorCoeff('Si')[0] |
---|
791 | ff = np.array(G2elem.ScatFac(FF,SQ)[0])*np.pi**2 #needs pi^2~10. for cw data (why?) |
---|
792 | iD = 0 |
---|
793 | while True: |
---|
794 | try: |
---|
795 | if hfx+'difC' in parmDict: |
---|
796 | q = 2*np.pi*parmDict[hfx+'difC']/xdata |
---|
797 | dbA = parmDict[hfx+'DebyeA;'+str(iD)] |
---|
798 | dbR = parmDict[hfx+'DebyeR;'+str(iD)] |
---|
799 | dbU = parmDict[hfx+'DebyeU;'+str(iD)] |
---|
800 | sqr = np.sin(q*dbR)/(q*dbR) |
---|
801 | cqr = np.cos(q*dbR) |
---|
802 | temp = np.exp(-dbU*q**2) |
---|
803 | dyddb[3*iD] = ff*sqr*temp |
---|
804 | dyddb[3*iD+1] = ff*dbA*temp*(cqr-sqr)/(dbR) |
---|
805 | dyddb[3*iD+2] = -ff*dbA*sqr*temp*q**2 |
---|
806 | iD += 1 |
---|
807 | except KeyError: |
---|
808 | break |
---|
809 | iD = 0 |
---|
810 | while True: |
---|
811 | try: |
---|
812 | pkP = parmDict[hfx+'BkPkpos;'+str(iD)] |
---|
813 | pkI = parmDict[hfx+'BkPkint;'+str(iD)] |
---|
814 | pkS = parmDict[hfx+'BkPksig;'+str(iD)] |
---|
815 | pkG = parmDict[hfx+'BkPkgam;'+str(iD)] |
---|
816 | if 'C' in dataType: |
---|
817 | Wd,fmin,fmax = getWidthsCW(pkP,pkS,pkG,.002) |
---|
818 | else: #'T'OF |
---|
819 | Wd,fmin,fmax = getWidthsTOF(pkP,1.,1.,pkS,pkG) |
---|
820 | iBeg = np.searchsorted(xdata,pkP-fmin) |
---|
821 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
822 | lenX = len(xdata) |
---|
823 | if not iBeg: |
---|
824 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
825 | elif iBeg == lenX: |
---|
826 | iFin = iBeg |
---|
827 | else: |
---|
828 | iFin = np.searchsorted(xdata,pkP+fmax) |
---|
829 | if 'C' in dataType: |
---|
830 | Df,dFdp,dFds,dFdg,x = getdFCJVoigt3(pkP,pkS,pkG,.002,xdata[iBeg:iFin]) |
---|
831 | dydpk[4*iD][iBeg:iFin] += 100.*cw[iBeg:iFin]*pkI*dFdp |
---|
832 | dydpk[4*iD+1][iBeg:iFin] += 100.*cw[iBeg:iFin]*Df |
---|
833 | dydpk[4*iD+2][iBeg:iFin] += 100.*cw[iBeg:iFin]*pkI*dFds |
---|
834 | dydpk[4*iD+3][iBeg:iFin] += 100.*cw[iBeg:iFin]*pkI*dFdg |
---|
835 | else: #'T'OF |
---|
836 | Df,dFdp,x,x,dFds,dFdg = getdEpsVoigt(pkP,1.,1.,pkS,pkG,xdata[iBeg:iFin]) |
---|
837 | dydpk[4*iD][iBeg:iFin] += pkI*dFdp |
---|
838 | dydpk[4*iD+1][iBeg:iFin] += Df |
---|
839 | dydpk[4*iD+2][iBeg:iFin] += pkI*dFds |
---|
840 | dydpk[4*iD+3][iBeg:iFin] += pkI*dFdg |
---|
841 | iD += 1 |
---|
842 | except KeyError: |
---|
843 | break |
---|
844 | except ValueError: |
---|
845 | print '**** WARNING - backround peak '+str(iD)+' sigma is negative; fix & try again ****' |
---|
846 | break |
---|
847 | return dydb,dyddb,dydpk |
---|
848 | |
---|
849 | #use old fortran routine |
---|
850 | def getFCJVoigt3(pos,sig,gam,shl,xdata): |
---|
851 | 'needs a doc string' |
---|
852 | |
---|
853 | Df = pyd.pypsvfcj(len(xdata),xdata-pos,pos,sig,gam,shl) |
---|
854 | # Df = pyd.pypsvfcjo(len(xdata),xdata-pos,pos,sig,gam,shl) |
---|
855 | Df /= np.sum(Df) |
---|
856 | return Df |
---|
857 | |
---|
858 | def getdFCJVoigt3(pos,sig,gam,shl,xdata): |
---|
859 | 'needs a doc string' |
---|
860 | |
---|
861 | Df,dFdp,dFds,dFdg,dFdsh = pyd.pydpsvfcj(len(xdata),xdata-pos,pos,sig,gam,shl) |
---|
862 | # Df,dFdp,dFds,dFdg,dFdsh = pyd.pydpsvfcjo(len(xdata),xdata-pos,pos,sig,gam,shl) |
---|
863 | sumDf = np.sum(Df) |
---|
864 | return Df,dFdp,dFds,dFdg,dFdsh |
---|
865 | |
---|
866 | def getPsVoigt(pos,sig,gam,xdata): |
---|
867 | 'needs a doc string' |
---|
868 | |
---|
869 | Df = pyd.pypsvoigt(len(xdata),xdata-pos,sig,gam) |
---|
870 | Df /= np.sum(Df) |
---|
871 | return Df |
---|
872 | |
---|
873 | def getdPsVoigt(pos,sig,gam,xdata): |
---|
874 | 'needs a doc string' |
---|
875 | |
---|
876 | Df,dFdp,dFds,dFdg = pyd.pydpsvoigt(len(xdata),xdata-pos,sig,gam) |
---|
877 | sumDf = np.sum(Df) |
---|
878 | return Df,dFdp,dFds,dFdg |
---|
879 | |
---|
880 | def getEpsVoigt(pos,alp,bet,sig,gam,xdata): |
---|
881 | 'needs a doc string' |
---|
882 | Df = pyd.pyepsvoigt(len(xdata),xdata-pos,alp,bet,sig,gam) |
---|
883 | Df /= np.sum(Df) |
---|
884 | return Df |
---|
885 | |
---|
886 | def getdEpsVoigt(pos,alp,bet,sig,gam,xdata): |
---|
887 | 'needs a doc string' |
---|
888 | Df,dFdp,dFda,dFdb,dFds,dFdg = pyd.pydepsvoigt(len(xdata),xdata-pos,alp,bet,sig,gam) |
---|
889 | sumDf = np.sum(Df) |
---|
890 | return Df,dFdp,dFda,dFdb,dFds,dFdg |
---|
891 | |
---|
892 | def ellipseSize(H,Sij,GB): |
---|
893 | 'needs a doc string' |
---|
894 | HX = np.inner(H.T,GB) |
---|
895 | lenHX = np.sqrt(np.sum(HX**2)) |
---|
896 | Esize,Rsize = nl.eigh(G2lat.U6toUij(Sij)) |
---|
897 | R = np.inner(HX/lenHX,Rsize)*Esize #want column length for hkl in crystal |
---|
898 | lenR = np.sqrt(np.sum(R**2)) |
---|
899 | return lenR |
---|
900 | |
---|
901 | def ellipseSizeDerv(H,Sij,GB): |
---|
902 | 'needs a doc string' |
---|
903 | lenR = ellipseSize(H,Sij,GB) |
---|
904 | delt = 0.001 |
---|
905 | dRdS = np.zeros(6) |
---|
906 | for i in range(6): |
---|
907 | Sij[i] -= delt |
---|
908 | lenM = ellipseSize(H,Sij,GB) |
---|
909 | Sij[i] += 2.*delt |
---|
910 | lenP = ellipseSize(H,Sij,GB) |
---|
911 | Sij[i] -= delt |
---|
912 | dRdS[i] = (lenP-lenM)/(2.*delt) |
---|
913 | return lenR,dRdS |
---|
914 | |
---|
915 | def getHKLpeak(dmin,SGData,A,Inst=None): |
---|
916 | 'needs a doc string' |
---|
917 | HKL = G2lat.GenHLaue(dmin,SGData,A) |
---|
918 | HKLs = [] |
---|
919 | for h,k,l,d in HKL: |
---|
920 | ext = G2spc.GenHKLf([h,k,l],SGData)[0] |
---|
921 | if not ext: |
---|
922 | if Inst == None: |
---|
923 | HKLs.append([h,k,l,d,0,-1]) |
---|
924 | else: |
---|
925 | HKLs.append([h,k,l,d,G2lat.Dsp2pos(Inst,d),-1]) |
---|
926 | return HKLs |
---|
927 | |
---|
928 | def getHKLMpeak(dmin,Inst,SGData,SSGData,Vec,maxH,A): |
---|
929 | 'needs a doc string' |
---|
930 | HKLs = [] |
---|
931 | vec = np.array(Vec) |
---|
932 | vstar = np.sqrt(G2lat.calc_rDsq(vec,A)) #find extra needed for -n SS reflections |
---|
933 | dvec = 1./(maxH*vstar+1./dmin) |
---|
934 | HKL = G2lat.GenHLaue(dvec,SGData,A) |
---|
935 | SSdH = [vec*h for h in range(-maxH,maxH+1)] |
---|
936 | SSdH = dict(zip(range(-maxH,maxH+1),SSdH)) |
---|
937 | for h,k,l,d in HKL: |
---|
938 | ext = G2spc.GenHKLf([h,k,l],SGData)[0] |
---|
939 | if not ext and d >= dmin: |
---|
940 | HKLs.append([h,k,l,0,d,G2lat.Dsp2pos(Inst,d),-1]) |
---|
941 | for dH in SSdH: |
---|
942 | if dH: |
---|
943 | DH = SSdH[dH] |
---|
944 | H = [h+DH[0],k+DH[1],l+DH[2]] |
---|
945 | d = float(1/np.sqrt(G2lat.calc_rDsq(H,A))) |
---|
946 | if d >= dmin: |
---|
947 | HKLM = np.array([h,k,l,dH]) |
---|
948 | if G2spc.checkSSextc(HKLM,SSGData): |
---|
949 | HKLs.append([h,k,l,dH,d,G2lat.Dsp2pos(Inst,d),-1]) |
---|
950 | # GSASIIpath.IPyBreak() |
---|
951 | return G2lat.sortHKLd(HKLs,True,True,True) |
---|
952 | |
---|
953 | def getPeakProfile(dataType,parmDict,xdata,varyList,bakType): |
---|
954 | 'needs a doc string' |
---|
955 | |
---|
956 | yb = getBackground('',parmDict,bakType,dataType,xdata)[0] |
---|
957 | yc = np.zeros_like(yb) |
---|
958 | cw = np.diff(xdata) |
---|
959 | cw = np.append(cw,cw[-1]) |
---|
960 | if 'C' in dataType: |
---|
961 | shl = max(parmDict['SH/L'],0.002) |
---|
962 | Ka2 = False |
---|
963 | if 'Lam1' in parmDict.keys(): |
---|
964 | Ka2 = True |
---|
965 | lamRatio = 360*(parmDict['Lam2']-parmDict['Lam1'])/(np.pi*parmDict['Lam1']) |
---|
966 | kRatio = parmDict['I(L2)/I(L1)'] |
---|
967 | iPeak = 0 |
---|
968 | while True: |
---|
969 | try: |
---|
970 | pos = parmDict['pos'+str(iPeak)] |
---|
971 | tth = (pos-parmDict['Zero']) |
---|
972 | intens = parmDict['int'+str(iPeak)] |
---|
973 | sigName = 'sig'+str(iPeak) |
---|
974 | if sigName in varyList: |
---|
975 | sig = parmDict[sigName] |
---|
976 | else: |
---|
977 | sig = G2mth.getCWsig(parmDict,tth) |
---|
978 | sig = max(sig,0.001) #avoid neg sigma^2 |
---|
979 | gamName = 'gam'+str(iPeak) |
---|
980 | if gamName in varyList: |
---|
981 | gam = parmDict[gamName] |
---|
982 | else: |
---|
983 | gam = G2mth.getCWgam(parmDict,tth) |
---|
984 | gam = max(gam,0.001) #avoid neg gamma |
---|
985 | Wd,fmin,fmax = getWidthsCW(pos,sig,gam,shl) |
---|
986 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
987 | iFin = np.searchsorted(xdata,pos+fmin) |
---|
988 | if not iBeg+iFin: #peak below low limit |
---|
989 | iPeak += 1 |
---|
990 | continue |
---|
991 | elif not iBeg-iFin: #peak above high limit |
---|
992 | return yb+yc |
---|
993 | yc[iBeg:iFin] += intens*getFCJVoigt3(pos,sig,gam,shl,xdata[iBeg:iFin]) |
---|
994 | if Ka2: |
---|
995 | pos2 = pos+lamRatio*tand(pos/2.0) # + 360/pi * Dlam/lam * tan(th) |
---|
996 | iBeg = np.searchsorted(xdata,pos2-fmin) |
---|
997 | iFin = np.searchsorted(xdata,pos2+fmin) |
---|
998 | if iBeg-iFin: |
---|
999 | yc[iBeg:iFin] += intens*kRatio*getFCJVoigt3(pos2,sig,gam,shl,xdata[iBeg:iFin]) |
---|
1000 | iPeak += 1 |
---|
1001 | except KeyError: #no more peaks to process |
---|
1002 | return yb+yc |
---|
1003 | else: |
---|
1004 | Pdabc = parmDict['Pdabc'] |
---|
1005 | difC = parmDict['difC'] |
---|
1006 | iPeak = 0 |
---|
1007 | while True: |
---|
1008 | try: |
---|
1009 | pos = parmDict['pos'+str(iPeak)] |
---|
1010 | tof = pos-parmDict['Zero'] |
---|
1011 | dsp = tof/difC |
---|
1012 | intens = parmDict['int'+str(iPeak)] |
---|
1013 | alpName = 'alp'+str(iPeak) |
---|
1014 | if alpName in varyList: |
---|
1015 | alp = parmDict[alpName] |
---|
1016 | else: |
---|
1017 | if len(Pdabc): |
---|
1018 | alp = np.interp(dsp,Pdabc[0],Pdabc[1]) |
---|
1019 | else: |
---|
1020 | alp = G2mth.getTOFalpha(parmDict,dsp) |
---|
1021 | alp = max(0.0001,alp) |
---|
1022 | betName = 'bet'+str(iPeak) |
---|
1023 | if betName in varyList: |
---|
1024 | bet = parmDict[betName] |
---|
1025 | else: |
---|
1026 | if len(Pdabc): |
---|
1027 | bet = np.interp(dsp,Pdabc[0],Pdabc[2]) |
---|
1028 | else: |
---|
1029 | bet = G2mth.getTOFbeta(parmDict,dsp) |
---|
1030 | bet = max(0.0001,bet) |
---|
1031 | sigName = 'sig'+str(iPeak) |
---|
1032 | if sigName in varyList: |
---|
1033 | sig = parmDict[sigName] |
---|
1034 | else: |
---|
1035 | sig = G2mth.getTOFsig(parmDict,dsp) |
---|
1036 | gamName = 'gam'+str(iPeak) |
---|
1037 | if gamName in varyList: |
---|
1038 | gam = parmDict[gamName] |
---|
1039 | else: |
---|
1040 | gam = G2mth.getTOFgamma(parmDict,dsp) |
---|
1041 | gam = max(gam,0.001) #avoid neg gamma |
---|
1042 | Wd,fmin,fmax = getWidthsTOF(pos,alp,bet,sig,gam) |
---|
1043 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1044 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1045 | lenX = len(xdata) |
---|
1046 | if not iBeg: |
---|
1047 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1048 | elif iBeg == lenX: |
---|
1049 | iFin = iBeg |
---|
1050 | else: |
---|
1051 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1052 | if not iBeg+iFin: #peak below low limit |
---|
1053 | iPeak += 1 |
---|
1054 | continue |
---|
1055 | elif not iBeg-iFin: #peak above high limit |
---|
1056 | return yb+yc |
---|
1057 | yc[iBeg:iFin] += intens*getEpsVoigt(pos,alp,bet,sig,gam,xdata[iBeg:iFin]) |
---|
1058 | iPeak += 1 |
---|
1059 | except KeyError: #no more peaks to process |
---|
1060 | return yb+yc |
---|
1061 | |
---|
1062 | def getPeakProfileDerv(dataType,parmDict,xdata,varyList,bakType): |
---|
1063 | 'needs a doc string' |
---|
1064 | # needs to return np.array([dMdx1,dMdx2,...]) in same order as varylist = backVary,insVary,peakVary order |
---|
1065 | dMdv = np.zeros(shape=(len(varyList),len(xdata))) |
---|
1066 | dMdb,dMddb,dMdpk = getBackgroundDerv('',parmDict,bakType,dataType,xdata) |
---|
1067 | if 'Back;0' in varyList: #background derivs are in front if present |
---|
1068 | dMdv[0:len(dMdb)] = dMdb |
---|
1069 | names = ['DebyeA','DebyeR','DebyeU'] |
---|
1070 | for name in varyList: |
---|
1071 | if 'Debye' in name: |
---|
1072 | parm,id = name.split(';') |
---|
1073 | ip = names.index(parm) |
---|
1074 | dMdv[varyList.index(name)] = dMddb[3*int(id)+ip] |
---|
1075 | names = ['BkPkpos','BkPkint','BkPksig','BkPkgam'] |
---|
1076 | for name in varyList: |
---|
1077 | if 'BkPk' in name: |
---|
1078 | parm,id = name.split(';') |
---|
1079 | ip = names.index(parm) |
---|
1080 | dMdv[varyList.index(name)] = dMdpk[4*int(id)+ip] |
---|
1081 | cw = np.diff(xdata) |
---|
1082 | cw = np.append(cw,cw[-1]) |
---|
1083 | if 'C' in dataType: |
---|
1084 | shl = max(parmDict['SH/L'],0.002) |
---|
1085 | Ka2 = False |
---|
1086 | if 'Lam1' in parmDict.keys(): |
---|
1087 | Ka2 = True |
---|
1088 | lamRatio = 360*(parmDict['Lam2']-parmDict['Lam1'])/(np.pi*parmDict['Lam1']) |
---|
1089 | kRatio = parmDict['I(L2)/I(L1)'] |
---|
1090 | iPeak = 0 |
---|
1091 | while True: |
---|
1092 | try: |
---|
1093 | pos = parmDict['pos'+str(iPeak)] |
---|
1094 | tth = (pos-parmDict['Zero']) |
---|
1095 | intens = parmDict['int'+str(iPeak)] |
---|
1096 | sigName = 'sig'+str(iPeak) |
---|
1097 | if sigName in varyList: |
---|
1098 | sig = parmDict[sigName] |
---|
1099 | dsdU = dsdV = dsdW = 0 |
---|
1100 | else: |
---|
1101 | sig = G2mth.getCWsig(parmDict,tth) |
---|
1102 | dsdU,dsdV,dsdW = G2mth.getCWsigDeriv(tth) |
---|
1103 | sig = max(sig,0.001) #avoid neg sigma |
---|
1104 | gamName = 'gam'+str(iPeak) |
---|
1105 | if gamName in varyList: |
---|
1106 | gam = parmDict[gamName] |
---|
1107 | dgdX = dgdY = 0 |
---|
1108 | else: |
---|
1109 | gam = G2mth.getCWgam(parmDict,tth) |
---|
1110 | dgdX,dgdY = G2mth.getCWgamDeriv(tth) |
---|
1111 | gam = max(gam,0.001) #avoid neg gamma |
---|
1112 | Wd,fmin,fmax = getWidthsCW(pos,sig,gam,shl) |
---|
1113 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1114 | iFin = np.searchsorted(xdata,pos+fmin) |
---|
1115 | if not iBeg+iFin: #peak below low limit |
---|
1116 | iPeak += 1 |
---|
1117 | continue |
---|
1118 | elif not iBeg-iFin: #peak above high limit |
---|
1119 | break |
---|
1120 | dMdpk = np.zeros(shape=(6,len(xdata))) |
---|
1121 | dMdipk = getdFCJVoigt3(pos,sig,gam,shl,xdata[iBeg:iFin]) |
---|
1122 | for i in range(1,5): |
---|
1123 | dMdpk[i][iBeg:iFin] += 100.*cw[iBeg:iFin]*intens*dMdipk[i] |
---|
1124 | dMdpk[0][iBeg:iFin] += 100.*cw[iBeg:iFin]*dMdipk[0] |
---|
1125 | dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'sig':dMdpk[2],'gam':dMdpk[3],'shl':dMdpk[4]} |
---|
1126 | if Ka2: |
---|
1127 | pos2 = pos+lamRatio*tand(pos/2.0) # + 360/pi * Dlam/lam * tan(th) |
---|
1128 | iBeg = np.searchsorted(xdata,pos2-fmin) |
---|
1129 | iFin = np.searchsorted(xdata,pos2+fmin) |
---|
1130 | if iBeg-iFin: |
---|
1131 | dMdipk2 = getdFCJVoigt3(pos2,sig,gam,shl,xdata[iBeg:iFin]) |
---|
1132 | for i in range(1,5): |
---|
1133 | dMdpk[i][iBeg:iFin] += 100.*cw[iBeg:iFin]*intens*kRatio*dMdipk2[i] |
---|
1134 | dMdpk[0][iBeg:iFin] += 100.*cw[iBeg:iFin]*kRatio*dMdipk2[0] |
---|
1135 | dMdpk[5][iBeg:iFin] += 100.*cw[iBeg:iFin]*dMdipk2[0] |
---|
1136 | dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'sig':dMdpk[2],'gam':dMdpk[3],'shl':dMdpk[4],'L1/L2':dMdpk[5]*intens} |
---|
1137 | for parmName in ['pos','int','sig','gam']: |
---|
1138 | try: |
---|
1139 | idx = varyList.index(parmName+str(iPeak)) |
---|
1140 | dMdv[idx] = dervDict[parmName] |
---|
1141 | except ValueError: |
---|
1142 | pass |
---|
1143 | if 'U' in varyList: |
---|
1144 | dMdv[varyList.index('U')] += dsdU*dervDict['sig'] |
---|
1145 | if 'V' in varyList: |
---|
1146 | dMdv[varyList.index('V')] += dsdV*dervDict['sig'] |
---|
1147 | if 'W' in varyList: |
---|
1148 | dMdv[varyList.index('W')] += dsdW*dervDict['sig'] |
---|
1149 | if 'X' in varyList: |
---|
1150 | dMdv[varyList.index('X')] += dgdX*dervDict['gam'] |
---|
1151 | if 'Y' in varyList: |
---|
1152 | dMdv[varyList.index('Y')] += dgdY*dervDict['gam'] |
---|
1153 | if 'SH/L' in varyList: |
---|
1154 | dMdv[varyList.index('SH/L')] += dervDict['shl'] #problem here |
---|
1155 | if 'I(L2)/I(L1)' in varyList: |
---|
1156 | dMdv[varyList.index('I(L2)/I(L1)')] += dervDict['L1/L2'] |
---|
1157 | iPeak += 1 |
---|
1158 | except KeyError: #no more peaks to process |
---|
1159 | break |
---|
1160 | else: |
---|
1161 | Pdabc = parmDict['Pdabc'] |
---|
1162 | difC = parmDict['difC'] |
---|
1163 | iPeak = 0 |
---|
1164 | while True: |
---|
1165 | try: |
---|
1166 | pos = parmDict['pos'+str(iPeak)] |
---|
1167 | tof = pos-parmDict['Zero'] |
---|
1168 | dsp = tof/difC |
---|
1169 | intens = parmDict['int'+str(iPeak)] |
---|
1170 | alpName = 'alp'+str(iPeak) |
---|
1171 | if alpName in varyList: |
---|
1172 | alp = parmDict[alpName] |
---|
1173 | else: |
---|
1174 | if len(Pdabc): |
---|
1175 | alp = np.interp(dsp,Pdabc[0],Pdabc[1]) |
---|
1176 | dad0 = 0 |
---|
1177 | else: |
---|
1178 | alp = G2mth.getTOFalpha(parmDict,dsp) |
---|
1179 | dada0 = G2mth.getTOFalphaDeriv(dsp) |
---|
1180 | betName = 'bet'+str(iPeak) |
---|
1181 | if betName in varyList: |
---|
1182 | bet = parmDict[betName] |
---|
1183 | else: |
---|
1184 | if len(Pdabc): |
---|
1185 | bet = np.interp(dsp,Pdabc[0],Pdabc[2]) |
---|
1186 | dbdb0 = dbdb1 = dbdb2 = 0 |
---|
1187 | else: |
---|
1188 | bet = G2mth.getTOFbeta(parmDict,dsp) |
---|
1189 | dbdb0,dbdb1,dbdb2 = G2mth.getTOFbetaDeriv(dsp) |
---|
1190 | sigName = 'sig'+str(iPeak) |
---|
1191 | if sigName in varyList: |
---|
1192 | sig = parmDict[sigName] |
---|
1193 | dsds0 = dsds1 = dsds2 = dsds3 = 0 |
---|
1194 | else: |
---|
1195 | sig = G2mth.getTOFsig(parmDict,dsp) |
---|
1196 | dsds0,dsds1,dsds2,dsds3 = G2mth.getTOFsigDeriv(dsp) |
---|
1197 | gamName = 'gam'+str(iPeak) |
---|
1198 | if gamName in varyList: |
---|
1199 | gam = parmDict[gamName] |
---|
1200 | dsdX = dsdY = 0 |
---|
1201 | else: |
---|
1202 | gam = G2mth.getTOFgamma(parmDict,dsp) |
---|
1203 | dsdX,dsdY = G2mth.getTOFgammaDeriv(dsp) |
---|
1204 | gam = max(gam,0.001) #avoid neg gamma |
---|
1205 | Wd,fmin,fmax = getWidthsTOF(pos,alp,bet,sig,gam) |
---|
1206 | iBeg = np.searchsorted(xdata,pos-fmin) |
---|
1207 | lenX = len(xdata) |
---|
1208 | if not iBeg: |
---|
1209 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1210 | elif iBeg == lenX: |
---|
1211 | iFin = iBeg |
---|
1212 | else: |
---|
1213 | iFin = np.searchsorted(xdata,pos+fmax) |
---|
1214 | if not iBeg+iFin: #peak below low limit |
---|
1215 | iPeak += 1 |
---|
1216 | continue |
---|
1217 | elif not iBeg-iFin: #peak above high limit |
---|
1218 | break |
---|
1219 | dMdpk = np.zeros(shape=(7,len(xdata))) |
---|
1220 | dMdipk = getdEpsVoigt(pos,alp,bet,sig,gam,xdata[iBeg:iFin]) |
---|
1221 | for i in range(1,6): |
---|
1222 | dMdpk[i][iBeg:iFin] += intens*cw[iBeg:iFin]*dMdipk[i] |
---|
1223 | dMdpk[0][iBeg:iFin] += cw[iBeg:iFin]*dMdipk[0] |
---|
1224 | dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'alp':dMdpk[2],'bet':dMdpk[3],'sig':dMdpk[4],'gam':dMdpk[5]} |
---|
1225 | for parmName in ['pos','int','alp','bet','sig','gam']: |
---|
1226 | try: |
---|
1227 | idx = varyList.index(parmName+str(iPeak)) |
---|
1228 | dMdv[idx] = dervDict[parmName] |
---|
1229 | except ValueError: |
---|
1230 | pass |
---|
1231 | if 'alpha' in varyList: |
---|
1232 | dMdv[varyList.index('alpha')] += dada0*dervDict['alp'] |
---|
1233 | if 'beta-0' in varyList: |
---|
1234 | dMdv[varyList.index('beta-0')] += dbdb0*dervDict['bet'] |
---|
1235 | if 'beta-1' in varyList: |
---|
1236 | dMdv[varyList.index('beta-1')] += dbdb1*dervDict['bet'] |
---|
1237 | if 'beta-q' in varyList: |
---|
1238 | dMdv[varyList.index('beta-q')] += dbdb2*dervDict['bet'] |
---|
1239 | if 'sig-0' in varyList: |
---|
1240 | dMdv[varyList.index('sig-0')] += dsds0*dervDict['sig'] |
---|
1241 | if 'sig-1' in varyList: |
---|
1242 | dMdv[varyList.index('sig-1')] += dsds1*dervDict['sig'] |
---|
1243 | if 'sig-2' in varyList: |
---|
1244 | dMdv[varyList.index('sig-2')] += dsds2*dervDict['sig'] |
---|
1245 | if 'sig-q' in varyList: |
---|
1246 | dMdv[varyList.index('sig-q')] += dsds3*dervDict['sig'] |
---|
1247 | if 'X' in varyList: |
---|
1248 | dMdv[varyList.index('X')] += dsdX*dervDict['gam'] |
---|
1249 | if 'Y' in varyList: |
---|
1250 | dMdv[varyList.index('Y')] += dsdY*dervDict['gam'] #problem here |
---|
1251 | iPeak += 1 |
---|
1252 | except KeyError: #no more peaks to process |
---|
1253 | break |
---|
1254 | return dMdv |
---|
1255 | |
---|
1256 | def Dict2Values(parmdict, varylist): |
---|
1257 | '''Use before call to leastsq to setup list of values for the parameters |
---|
1258 | in parmdict, as selected by key in varylist''' |
---|
1259 | return [parmdict[key] for key in varylist] |
---|
1260 | |
---|
1261 | def Values2Dict(parmdict, varylist, values): |
---|
1262 | ''' Use after call to leastsq to update the parameter dictionary with |
---|
1263 | values corresponding to keys in varylist''' |
---|
1264 | parmdict.update(zip(varylist,values)) |
---|
1265 | |
---|
1266 | def SetBackgroundParms(Background): |
---|
1267 | 'needs a doc string' |
---|
1268 | if len(Background) == 1: # fix up old backgrounds |
---|
1269 | BackGround.append({'nDebye':0,'debyeTerms':[]}) |
---|
1270 | bakType,bakFlag = Background[0][:2] |
---|
1271 | backVals = Background[0][3:] |
---|
1272 | backNames = ['Back;'+str(i) for i in range(len(backVals))] |
---|
1273 | Debye = Background[1] #also has background peaks stuff |
---|
1274 | backDict = dict(zip(backNames,backVals)) |
---|
1275 | backVary = [] |
---|
1276 | if bakFlag: |
---|
1277 | backVary = backNames |
---|
1278 | |
---|
1279 | backDict['nDebye'] = Debye['nDebye'] |
---|
1280 | debyeDict = {} |
---|
1281 | debyeList = [] |
---|
1282 | for i in range(Debye['nDebye']): |
---|
1283 | debyeNames = ['DebyeA;'+str(i),'DebyeR;'+str(i),'DebyeU;'+str(i)] |
---|
1284 | debyeDict.update(dict(zip(debyeNames,Debye['debyeTerms'][i][::2]))) |
---|
1285 | debyeList += zip(debyeNames,Debye['debyeTerms'][i][1::2]) |
---|
1286 | debyeVary = [] |
---|
1287 | for item in debyeList: |
---|
1288 | if item[1]: |
---|
1289 | debyeVary.append(item[0]) |
---|
1290 | backDict.update(debyeDict) |
---|
1291 | backVary += debyeVary |
---|
1292 | |
---|
1293 | backDict['nPeaks'] = Debye['nPeaks'] |
---|
1294 | peaksDict = {} |
---|
1295 | peaksList = [] |
---|
1296 | for i in range(Debye['nPeaks']): |
---|
1297 | peaksNames = ['BkPkpos;'+str(i),'BkPkint;'+str(i),'BkPksig;'+str(i),'BkPkgam;'+str(i)] |
---|
1298 | peaksDict.update(dict(zip(peaksNames,Debye['peaksList'][i][::2]))) |
---|
1299 | peaksList += zip(peaksNames,Debye['peaksList'][i][1::2]) |
---|
1300 | peaksVary = [] |
---|
1301 | for item in peaksList: |
---|
1302 | if item[1]: |
---|
1303 | peaksVary.append(item[0]) |
---|
1304 | backDict.update(peaksDict) |
---|
1305 | backVary += peaksVary |
---|
1306 | return bakType,backDict,backVary |
---|
1307 | |
---|
1308 | def DoCalibInst(IndexPeaks,Inst): |
---|
1309 | |
---|
1310 | def SetInstParms(): |
---|
1311 | dataType = Inst['Type'][0] |
---|
1312 | insVary = [] |
---|
1313 | insNames = [] |
---|
1314 | insVals = [] |
---|
1315 | for parm in Inst: |
---|
1316 | insNames.append(parm) |
---|
1317 | insVals.append(Inst[parm][1]) |
---|
1318 | if parm in ['Lam','difC','difA','difB','Zero',]: |
---|
1319 | if Inst[parm][2]: |
---|
1320 | insVary.append(parm) |
---|
1321 | instDict = dict(zip(insNames,insVals)) |
---|
1322 | return dataType,instDict,insVary |
---|
1323 | |
---|
1324 | def GetInstParms(parmDict,Inst,varyList): |
---|
1325 | for name in Inst: |
---|
1326 | Inst[name][1] = parmDict[name] |
---|
1327 | |
---|
1328 | def InstPrint(Inst,sigDict): |
---|
1329 | print 'Instrument Parameters:' |
---|
1330 | if 'C' in Inst['Type'][0]: |
---|
1331 | ptfmt = "%12.6f" |
---|
1332 | else: |
---|
1333 | ptfmt = "%12.3f" |
---|
1334 | ptlbls = 'names :' |
---|
1335 | ptstr = 'values:' |
---|
1336 | sigstr = 'esds :' |
---|
1337 | for parm in Inst: |
---|
1338 | if parm in ['Lam','difC','difA','difB','Zero',]: |
---|
1339 | ptlbls += "%s" % (parm.center(12)) |
---|
1340 | ptstr += ptfmt % (Inst[parm][1]) |
---|
1341 | if parm in sigDict: |
---|
1342 | sigstr += ptfmt % (sigDict[parm]) |
---|
1343 | else: |
---|
1344 | sigstr += 12*' ' |
---|
1345 | print ptlbls |
---|
1346 | print ptstr |
---|
1347 | print sigstr |
---|
1348 | |
---|
1349 | def errPeakPos(values,peakDsp,peakPos,peakWt,dataType,parmDict,varyList): |
---|
1350 | parmDict.update(zip(varyList,values)) |
---|
1351 | return np.sqrt(peakWt)*(G2lat.getPeakPos(dataType,parmDict,peakDsp)-peakPos) |
---|
1352 | |
---|
1353 | peakPos = [] |
---|
1354 | peakDsp = [] |
---|
1355 | peakWt = [] |
---|
1356 | for peak,sig in zip(IndexPeaks[0],IndexPeaks[1]): |
---|
1357 | if peak[2] and peak[3] and sig > 0.: |
---|
1358 | peakPos.append(peak[0]) |
---|
1359 | peakDsp.append(peak[-1]) #d-calc |
---|
1360 | peakWt.append(peak[-1]**2/sig**2) #weight by d**2 |
---|
1361 | peakPos = np.array(peakPos) |
---|
1362 | peakDsp = np.array(peakDsp) |
---|
1363 | peakWt = np.array(peakWt) |
---|
1364 | dataType,insDict,insVary = SetInstParms() |
---|
1365 | parmDict = {} |
---|
1366 | parmDict.update(insDict) |
---|
1367 | varyList = insVary |
---|
1368 | if not len(varyList): |
---|
1369 | print '**** ERROR - nothing to refine! ****' |
---|
1370 | return False |
---|
1371 | while True: |
---|
1372 | begin = time.time() |
---|
1373 | values = np.array(Dict2Values(parmDict, varyList)) |
---|
1374 | result = so.leastsq(errPeakPos,values,full_output=True,ftol=0.000001, |
---|
1375 | args=(peakDsp,peakPos,peakWt,dataType,parmDict,varyList)) |
---|
1376 | ncyc = int(result[2]['nfev']/2) |
---|
1377 | runtime = time.time()-begin |
---|
1378 | chisq = np.sum(result[2]['fvec']**2) |
---|
1379 | Values2Dict(parmDict, varyList, result[0]) |
---|
1380 | GOF = chisq/(len(peakPos)-len(varyList)) #reduced chi^2 |
---|
1381 | print 'Number of function calls:',result[2]['nfev'],' Number of observations: ',len(peakPos),' Number of parameters: ',len(varyList) |
---|
1382 | print 'calib time = %8.3fs, %8.3fs/cycle'%(runtime,runtime/ncyc) |
---|
1383 | print 'chi**2 = %12.6g, reduced chi**2 = %6.2f'%(chisq,GOF) |
---|
1384 | try: |
---|
1385 | sig = np.sqrt(np.diag(result[1])*GOF) |
---|
1386 | if np.any(np.isnan(sig)): |
---|
1387 | print '*** Least squares aborted - some invalid esds possible ***' |
---|
1388 | break #refinement succeeded - finish up! |
---|
1389 | except ValueError: #result[1] is None on singular matrix |
---|
1390 | print '**** Refinement failed - singular matrix ****' |
---|
1391 | |
---|
1392 | sigDict = dict(zip(varyList,sig)) |
---|
1393 | GetInstParms(parmDict,Inst,varyList) |
---|
1394 | InstPrint(Inst,sigDict) |
---|
1395 | return True |
---|
1396 | |
---|
1397 | def DoPeakFit(FitPgm,Peaks,Background,Limits,Inst,Inst2,data,prevVaryList=[],oneCycle=False,controls=None,dlg=None): |
---|
1398 | '''Called to perform a peak fit, refining the selected items in the peak |
---|
1399 | table as well as selected items in the background. |
---|
1400 | |
---|
1401 | :param str FitPgm: type of fit to perform. At present this is ignored. |
---|
1402 | :param list Peaks: a list of peaks. Each peak entry is a list with 8 values: |
---|
1403 | four values followed by a refine flag where the values are: position, intensity, |
---|
1404 | sigma (Gaussian width) and gamma (Lorentzian width). From the Histogram/"Peak List" |
---|
1405 | tree entry, dict item "peaks" |
---|
1406 | :param list Background: describes the background. List with two items. |
---|
1407 | Item 0 specifies a background model and coefficients. Item 1 is a dict. |
---|
1408 | From the Histogram/Background tree entry. |
---|
1409 | :param list Limits: min and max x-value to use |
---|
1410 | :param dict Inst: Instrument parameters |
---|
1411 | :param dict Inst2: more Instrument parameters |
---|
1412 | :param numpy.array data: a 5xn array. data[0] is the x-values, |
---|
1413 | data[1] is the y-values, data[2] are weight values, data[3], [4] and [5] are |
---|
1414 | calc, background and difference intensities, respectively. |
---|
1415 | :param list prevVaryList: Used in sequential refinements to override the |
---|
1416 | variable list. Defaults as an empty list. |
---|
1417 | :param bool oneCycle: True if only one cycle of fitting should be performed |
---|
1418 | :param dict controls: a dict specifying two values, Ftol = controls['min dM/M'] |
---|
1419 | and derivType = controls['deriv type']. If None default values are used. |
---|
1420 | :param wx.Dialog dlg: A dialog box that is updated with progress from the fit. |
---|
1421 | Defaults to None, which means no updates are done. |
---|
1422 | ''' |
---|
1423 | def GetBackgroundParms(parmList,Background): |
---|
1424 | iBak = 0 |
---|
1425 | while True: |
---|
1426 | try: |
---|
1427 | bakName = 'Back;'+str(iBak) |
---|
1428 | Background[0][iBak+3] = parmList[bakName] |
---|
1429 | iBak += 1 |
---|
1430 | except KeyError: |
---|
1431 | break |
---|
1432 | iDb = 0 |
---|
1433 | while True: |
---|
1434 | names = ['DebyeA;','DebyeR;','DebyeU;'] |
---|
1435 | try: |
---|
1436 | for i,name in enumerate(names): |
---|
1437 | val = parmList[name+str(iDb)] |
---|
1438 | Background[1]['debyeTerms'][iDb][2*i] = val |
---|
1439 | iDb += 1 |
---|
1440 | except KeyError: |
---|
1441 | break |
---|
1442 | iDb = 0 |
---|
1443 | while True: |
---|
1444 | names = ['BkPkpos;','BkPkint;','BkPksig;','BkPkgam;'] |
---|
1445 | try: |
---|
1446 | for i,name in enumerate(names): |
---|
1447 | val = parmList[name+str(iDb)] |
---|
1448 | Background[1]['peaksList'][iDb][2*i] = val |
---|
1449 | iDb += 1 |
---|
1450 | except KeyError: |
---|
1451 | break |
---|
1452 | |
---|
1453 | def BackgroundPrint(Background,sigDict): |
---|
1454 | print 'Background coefficients for',Background[0][0],'function' |
---|
1455 | ptfmt = "%12.5f" |
---|
1456 | ptstr = 'value: ' |
---|
1457 | sigstr = 'esd : ' |
---|
1458 | for i,back in enumerate(Background[0][3:]): |
---|
1459 | ptstr += ptfmt % (back) |
---|
1460 | if Background[0][1]: |
---|
1461 | prm = 'Back;'+str(i) |
---|
1462 | if prm in sigDict: |
---|
1463 | sigstr += ptfmt % (sigDict[prm]) |
---|
1464 | else: |
---|
1465 | sigstr += " "*12 |
---|
1466 | if len(ptstr) > 75: |
---|
1467 | print ptstr |
---|
1468 | if Background[0][1]: print sigstr |
---|
1469 | ptstr = 'value: ' |
---|
1470 | sigstr = 'esd : ' |
---|
1471 | if len(ptstr) > 8: |
---|
1472 | print ptstr |
---|
1473 | if Background[0][1]: print sigstr |
---|
1474 | |
---|
1475 | if Background[1]['nDebye']: |
---|
1476 | parms = ['DebyeA;','DebyeR;','DebyeU;'] |
---|
1477 | print 'Debye diffuse scattering coefficients' |
---|
1478 | ptfmt = "%12.5f" |
---|
1479 | print ' term DebyeA esd DebyeR esd DebyeU esd' |
---|
1480 | for term in range(Background[1]['nDebye']): |
---|
1481 | line = ' term %d'%(term) |
---|
1482 | for ip,name in enumerate(parms): |
---|
1483 | line += ptfmt%(Background[1]['debyeTerms'][term][2*ip]) |
---|
1484 | if name+str(term) in sigDict: |
---|
1485 | line += ptfmt%(sigDict[name+str(term)]) |
---|
1486 | else: |
---|
1487 | line += " "*12 |
---|
1488 | print line |
---|
1489 | if Background[1]['nPeaks']: |
---|
1490 | print 'Coefficients for Background Peaks' |
---|
1491 | ptfmt = "%15.3f" |
---|
1492 | for j,pl in enumerate(Background[1]['peaksList']): |
---|
1493 | names = 'peak %3d:'%(j+1) |
---|
1494 | ptstr = 'values :' |
---|
1495 | sigstr = 'esds :' |
---|
1496 | for i,lbl in enumerate(['BkPkpos','BkPkint','BkPksig','BkPkgam']): |
---|
1497 | val = pl[2*i] |
---|
1498 | prm = lbl+";"+str(j) |
---|
1499 | names += '%15s'%(prm) |
---|
1500 | ptstr += ptfmt%(val) |
---|
1501 | if prm in sigDict: |
---|
1502 | sigstr += ptfmt%(sigDict[prm]) |
---|
1503 | else: |
---|
1504 | sigstr += " "*15 |
---|
1505 | print names |
---|
1506 | print ptstr |
---|
1507 | print sigstr |
---|
1508 | |
---|
1509 | def SetInstParms(Inst): |
---|
1510 | dataType = Inst['Type'][0] |
---|
1511 | insVary = [] |
---|
1512 | insNames = [] |
---|
1513 | insVals = [] |
---|
1514 | for parm in Inst: |
---|
1515 | insNames.append(parm) |
---|
1516 | insVals.append(Inst[parm][1]) |
---|
1517 | if parm in ['U','V','W','X','Y','SH/L','I(L2)/I(L1)','alpha', |
---|
1518 | 'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q',] and Inst[parm][2]: |
---|
1519 | insVary.append(parm) |
---|
1520 | instDict = dict(zip(insNames,insVals)) |
---|
1521 | instDict['X'] = max(instDict['X'],0.01) |
---|
1522 | instDict['Y'] = max(instDict['Y'],0.01) |
---|
1523 | if 'SH/L' in instDict: |
---|
1524 | instDict['SH/L'] = max(instDict['SH/L'],0.002) |
---|
1525 | return dataType,instDict,insVary |
---|
1526 | |
---|
1527 | def GetInstParms(parmDict,Inst,varyList,Peaks): |
---|
1528 | for name in Inst: |
---|
1529 | Inst[name][1] = parmDict[name] |
---|
1530 | iPeak = 0 |
---|
1531 | while True: |
---|
1532 | try: |
---|
1533 | sigName = 'sig'+str(iPeak) |
---|
1534 | pos = parmDict['pos'+str(iPeak)] |
---|
1535 | if sigName not in varyList: |
---|
1536 | if 'C' in Inst['Type'][0]: |
---|
1537 | parmDict[sigName] = G2mth.getCWsig(parmDict,pos) |
---|
1538 | else: |
---|
1539 | dsp = G2lat.Pos2dsp(Inst,pos) |
---|
1540 | parmDict[sigName] = G2mth.getTOFsig(parmDict,dsp) |
---|
1541 | gamName = 'gam'+str(iPeak) |
---|
1542 | if gamName not in varyList: |
---|
1543 | if 'C' in Inst['Type'][0]: |
---|
1544 | parmDict[gamName] = G2mth.getCWgam(parmDict,pos) |
---|
1545 | else: |
---|
1546 | dsp = G2lat.Pos2dsp(Inst,pos) |
---|
1547 | parmDict[gamName] = G2mth.getTOFgamma(parmDict,dsp) |
---|
1548 | iPeak += 1 |
---|
1549 | except KeyError: |
---|
1550 | break |
---|
1551 | |
---|
1552 | def InstPrint(Inst,sigDict): |
---|
1553 | print 'Instrument Parameters:' |
---|
1554 | ptfmt = "%12.6f" |
---|
1555 | ptlbls = 'names :' |
---|
1556 | ptstr = 'values:' |
---|
1557 | sigstr = 'esds :' |
---|
1558 | for parm in Inst: |
---|
1559 | if parm in ['U','V','W','X','Y','SH/L','I(L2)/I(L1)','alpha', |
---|
1560 | 'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q',]: |
---|
1561 | ptlbls += "%s" % (parm.center(12)) |
---|
1562 | ptstr += ptfmt % (Inst[parm][1]) |
---|
1563 | if parm in sigDict: |
---|
1564 | sigstr += ptfmt % (sigDict[parm]) |
---|
1565 | else: |
---|
1566 | sigstr += 12*' ' |
---|
1567 | print ptlbls |
---|
1568 | print ptstr |
---|
1569 | print sigstr |
---|
1570 | |
---|
1571 | def SetPeaksParms(dataType,Peaks): |
---|
1572 | peakNames = [] |
---|
1573 | peakVary = [] |
---|
1574 | peakVals = [] |
---|
1575 | if 'C' in dataType: |
---|
1576 | names = ['pos','int','sig','gam'] |
---|
1577 | else: |
---|
1578 | names = ['pos','int','alp','bet','sig','gam'] |
---|
1579 | for i,peak in enumerate(Peaks): |
---|
1580 | for j,name in enumerate(names): |
---|
1581 | peakVals.append(peak[2*j]) |
---|
1582 | parName = name+str(i) |
---|
1583 | peakNames.append(parName) |
---|
1584 | if peak[2*j+1]: |
---|
1585 | peakVary.append(parName) |
---|
1586 | return dict(zip(peakNames,peakVals)),peakVary |
---|
1587 | |
---|
1588 | def GetPeaksParms(Inst,parmDict,Peaks,varyList): |
---|
1589 | if 'C' in Inst['Type'][0]: |
---|
1590 | names = ['pos','int','sig','gam'] |
---|
1591 | else: #'T' |
---|
1592 | names = ['pos','int','alp','bet','sig','gam'] |
---|
1593 | for i,peak in enumerate(Peaks): |
---|
1594 | pos = parmDict['pos'+str(i)] |
---|
1595 | if 'difC' in Inst: |
---|
1596 | dsp = pos/Inst['difC'][1] |
---|
1597 | for j in range(len(names)): |
---|
1598 | parName = names[j]+str(i) |
---|
1599 | if parName in varyList: |
---|
1600 | peak[2*j] = parmDict[parName] |
---|
1601 | elif 'alpha' in parName: |
---|
1602 | peak[2*j] = parmDict['alpha']/dsp |
---|
1603 | elif 'beta' in parName: |
---|
1604 | peak[2*j] = G2mth.getTOFbeta(parmDict,dsp) |
---|
1605 | elif 'sig' in parName: |
---|
1606 | if 'C' in Inst['Type'][0]: |
---|
1607 | peak[2*j] = G2mth.getCWsig(parmDict,pos) |
---|
1608 | else: |
---|
1609 | peak[2*j] = G2mth.getTOFsig(parmDict,dsp) |
---|
1610 | elif 'gam' in parName: |
---|
1611 | if 'C' in Inst['Type'][0]: |
---|
1612 | peak[2*j] = G2mth.getCWgam(parmDict,pos) |
---|
1613 | else: |
---|
1614 | peak[2*j] = G2mth.getTOFgamma(parmDict,dsp) |
---|
1615 | |
---|
1616 | def PeaksPrint(dataType,parmDict,sigDict,varyList): |
---|
1617 | print 'Peak coefficients:' |
---|
1618 | if 'C' in dataType: |
---|
1619 | names = ['pos','int','sig','gam'] |
---|
1620 | else: #'T' |
---|
1621 | names = ['pos','int','alp','bet','sig','gam'] |
---|
1622 | head = 13*' ' |
---|
1623 | for name in names: |
---|
1624 | if name in ['alp','bet']: |
---|
1625 | head += name.center(8)+'esd'.center(8) |
---|
1626 | else: |
---|
1627 | head += name.center(10)+'esd'.center(10) |
---|
1628 | print head |
---|
1629 | if 'C' in dataType: |
---|
1630 | ptfmt = {'pos':"%10.5f",'int':"%10.1f",'sig':"%10.3f",'gam':"%10.3f"} |
---|
1631 | else: |
---|
1632 | ptfmt = {'pos':"%10.2f",'int':"%10.4f",'alp':"%8.3f",'bet':"%8.5f",'sig':"%10.3f",'gam':"%10.3f"} |
---|
1633 | for i,peak in enumerate(Peaks): |
---|
1634 | ptstr = ':' |
---|
1635 | for j in range(len(names)): |
---|
1636 | name = names[j] |
---|
1637 | parName = name+str(i) |
---|
1638 | ptstr += ptfmt[name] % (parmDict[parName]) |
---|
1639 | if parName in varyList: |
---|
1640 | ptstr += ptfmt[name] % (sigDict[parName]) |
---|
1641 | else: |
---|
1642 | if name in ['alp','bet']: |
---|
1643 | ptstr += 8*' ' |
---|
1644 | else: |
---|
1645 | ptstr += 10*' ' |
---|
1646 | print '%s'%(('Peak'+str(i+1)).center(8)),ptstr |
---|
1647 | |
---|
1648 | def devPeakProfile(values,xdata,ydata, weights,dataType,parmdict,varylist,bakType,dlg): |
---|
1649 | parmdict.update(zip(varylist,values)) |
---|
1650 | return np.sqrt(weights)*getPeakProfileDerv(dataType,parmdict,xdata,varylist,bakType) |
---|
1651 | |
---|
1652 | def errPeakProfile(values,xdata,ydata,weights,dataType,parmdict,varylist,bakType,dlg): |
---|
1653 | parmdict.update(zip(varylist,values)) |
---|
1654 | M = np.sqrt(weights)*(getPeakProfile(dataType,parmdict,xdata,varylist,bakType)-ydata) |
---|
1655 | Rwp = min(100.,np.sqrt(np.sum(M**2)/np.sum(weights*ydata**2))*100.) |
---|
1656 | if dlg: |
---|
1657 | GoOn = dlg.Update(Rwp,newmsg='%s%8.3f%s'%('Peak fit Rwp =',Rwp,'%'))[0] |
---|
1658 | if not GoOn: |
---|
1659 | return -M #abort!! |
---|
1660 | return M |
---|
1661 | |
---|
1662 | if controls: |
---|
1663 | Ftol = controls['min dM/M'] |
---|
1664 | derivType = controls['deriv type'] |
---|
1665 | else: |
---|
1666 | Ftol = 0.0001 |
---|
1667 | derivType = 'analytic' |
---|
1668 | if oneCycle: |
---|
1669 | Ftol = 1.0 |
---|
1670 | x,y,w,yc,yb,yd = data #these are numpy arrays! |
---|
1671 | yc *= 0. #set calcd ones to zero |
---|
1672 | yb *= 0. |
---|
1673 | yd *= 0. |
---|
1674 | xBeg = np.searchsorted(x,Limits[0]) |
---|
1675 | xFin = np.searchsorted(x,Limits[1]) |
---|
1676 | bakType,bakDict,bakVary = SetBackgroundParms(Background) |
---|
1677 | dataType,insDict,insVary = SetInstParms(Inst) |
---|
1678 | peakDict,peakVary = SetPeaksParms(Inst['Type'][0],Peaks) |
---|
1679 | parmDict = {} |
---|
1680 | parmDict.update(bakDict) |
---|
1681 | parmDict.update(insDict) |
---|
1682 | parmDict.update(peakDict) |
---|
1683 | parmDict['Pdabc'] = [] #dummy Pdabc |
---|
1684 | parmDict.update(Inst2) #put in real one if there |
---|
1685 | if prevVaryList: |
---|
1686 | varyList = prevVaryList[:] |
---|
1687 | else: |
---|
1688 | varyList = bakVary+insVary+peakVary |
---|
1689 | fullvaryList = varyList[:] |
---|
1690 | while True: |
---|
1691 | begin = time.time() |
---|
1692 | values = np.array(Dict2Values(parmDict, varyList)) |
---|
1693 | Rvals = {} |
---|
1694 | badVary = [] |
---|
1695 | result = so.leastsq(errPeakProfile,values,Dfun=devPeakProfile,full_output=True,ftol=Ftol,col_deriv=True, |
---|
1696 | args=(x[xBeg:xFin],y[xBeg:xFin],w[xBeg:xFin],dataType,parmDict,varyList,bakType,dlg)) |
---|
1697 | ncyc = int(result[2]['nfev']/2) |
---|
1698 | runtime = time.time()-begin |
---|
1699 | chisq = np.sum(result[2]['fvec']**2) |
---|
1700 | Values2Dict(parmDict, varyList, result[0]) |
---|
1701 | Rvals['Rwp'] = np.sqrt(chisq/np.sum(w[xBeg:xFin]*y[xBeg:xFin]**2))*100. #to % |
---|
1702 | Rvals['GOF'] = chisq/(xFin-xBeg-len(varyList)) #reduced chi^2 |
---|
1703 | print 'Number of function calls:',result[2]['nfev'],' Number of observations: ',xFin-xBeg,' Number of parameters: ',len(varyList) |
---|
1704 | if ncyc: |
---|
1705 | print 'fitpeak time = %8.3fs, %8.3fs/cycle'%(runtime,runtime/ncyc) |
---|
1706 | print 'Rwp = %7.2f%%, chi**2 = %12.6g, reduced chi**2 = %6.2f'%(Rvals['Rwp'],chisq,Rvals['GOF']) |
---|
1707 | sig = [0]*len(varyList) |
---|
1708 | if len(varyList) == 0: break # if nothing was refined |
---|
1709 | try: |
---|
1710 | sig = np.sqrt(np.diag(result[1])*Rvals['GOF']) |
---|
1711 | if np.any(np.isnan(sig)): |
---|
1712 | print '*** Least squares aborted - some invalid esds possible ***' |
---|
1713 | break #refinement succeeded - finish up! |
---|
1714 | except ValueError: #result[1] is None on singular matrix |
---|
1715 | print '**** Refinement failed - singular matrix ****' |
---|
1716 | Ipvt = result[2]['ipvt'] |
---|
1717 | for i,ipvt in enumerate(Ipvt): |
---|
1718 | if not np.sum(result[2]['fjac'],axis=1)[i]: |
---|
1719 | print 'Removing parameter: ',varyList[ipvt-1] |
---|
1720 | badVary.append(varyList[ipvt-1]) |
---|
1721 | del(varyList[ipvt-1]) |
---|
1722 | break |
---|
1723 | else: # nothing removed |
---|
1724 | break |
---|
1725 | if dlg: dlg.Destroy() |
---|
1726 | sigDict = dict(zip(varyList,sig)) |
---|
1727 | yb[xBeg:xFin] = getBackground('',parmDict,bakType,dataType,x[xBeg:xFin])[0] |
---|
1728 | yc[xBeg:xFin] = getPeakProfile(dataType,parmDict,x[xBeg:xFin],varyList,bakType) |
---|
1729 | yd[xBeg:xFin] = y[xBeg:xFin]-yc[xBeg:xFin] |
---|
1730 | GetBackgroundParms(parmDict,Background) |
---|
1731 | if bakVary: BackgroundPrint(Background,sigDict) |
---|
1732 | GetInstParms(parmDict,Inst,varyList,Peaks) |
---|
1733 | if insVary: InstPrint(Inst,sigDict) |
---|
1734 | GetPeaksParms(Inst,parmDict,Peaks,varyList) |
---|
1735 | if peakVary: PeaksPrint(dataType,parmDict,sigDict,varyList) |
---|
1736 | return sigDict,result,sig,Rvals,varyList,parmDict,fullvaryList,badVary |
---|
1737 | |
---|
1738 | def calcIncident(Iparm,xdata): |
---|
1739 | 'needs a doc string' |
---|
1740 | |
---|
1741 | def IfunAdv(Iparm,xdata): |
---|
1742 | Itype = Iparm['Itype'] |
---|
1743 | Icoef = Iparm['Icoeff'] |
---|
1744 | DYI = np.ones((12,xdata.shape[0])) |
---|
1745 | YI = np.ones_like(xdata)*Icoef[0] |
---|
1746 | |
---|
1747 | x = xdata/1000. #expressions are in ms |
---|
1748 | if Itype == 'Exponential': |
---|
1749 | for i in [1,3,5,7,9]: |
---|
1750 | Eterm = np.exp(-Icoef[i+1]*x**((i+1)/2)) |
---|
1751 | YI += Icoef[i]*Eterm |
---|
1752 | DYI[i] *= Eterm |
---|
1753 | DYI[i+1] *= -Icoef[i]*Eterm*x**((i+1)/2) |
---|
1754 | elif 'Maxwell'in Itype: |
---|
1755 | Eterm = np.exp(-Icoef[2]/x**2) |
---|
1756 | DYI[1] = Eterm/x**5 |
---|
1757 | DYI[2] = -Icoef[1]*DYI[1]/x**2 |
---|
1758 | YI += (Icoef[1]*Eterm/x**5) |
---|
1759 | if 'Exponential' in Itype: |
---|
1760 | for i in range(3,11,2): |
---|
1761 | Eterm = np.exp(-Icoef[i+1]*x**((i+1)/2)) |
---|
1762 | YI += Icoef[i]*Eterm |
---|
1763 | DYI[i] *= Eterm |
---|
1764 | DYI[i+1] *= -Icoef[i]*Eterm*x**((i+1)/2) |
---|
1765 | else: #Chebyschev |
---|
1766 | T = (2./x)-1. |
---|
1767 | Ccof = np.ones((12,xdata.shape[0])) |
---|
1768 | Ccof[1] = T |
---|
1769 | for i in range(2,12): |
---|
1770 | Ccof[i] = 2*T*Ccof[i-1]-Ccof[i-2] |
---|
1771 | for i in range(1,10): |
---|
1772 | YI += Ccof[i]*Icoef[i+2] |
---|
1773 | DYI[i+2] =Ccof[i] |
---|
1774 | return YI,DYI |
---|
1775 | |
---|
1776 | Iesd = np.array(Iparm['Iesd']) |
---|
1777 | Icovar = Iparm['Icovar'] |
---|
1778 | YI,DYI = IfunAdv(Iparm,xdata) |
---|
1779 | YI = np.where(YI>0,YI,1.) |
---|
1780 | WYI = np.zeros_like(xdata) |
---|
1781 | vcov = np.zeros((12,12)) |
---|
1782 | k = 0 |
---|
1783 | for i in range(12): |
---|
1784 | for j in range(i,12): |
---|
1785 | vcov[i][j] = Icovar[k]*Iesd[i]*Iesd[j] |
---|
1786 | vcov[j][i] = Icovar[k]*Iesd[i]*Iesd[j] |
---|
1787 | k += 1 |
---|
1788 | M = np.inner(vcov,DYI.T) |
---|
1789 | WYI = np.sum(M*DYI,axis=0) |
---|
1790 | WYI = np.where(WYI>0.,WYI,0.) |
---|
1791 | return YI,WYI |
---|
1792 | |
---|
1793 | ################################################################################ |
---|
1794 | # Stacking fault simulation codes |
---|
1795 | ################################################################################ |
---|
1796 | |
---|
1797 | def GetStackParms(Layers): |
---|
1798 | |
---|
1799 | Parms = [] |
---|
1800 | #cell parms |
---|
1801 | cell = Layers['Cell'] |
---|
1802 | if Layers['Laue'] in ['-3','-3m','4/m','4/mmm','6/m','6/mmm']: |
---|
1803 | Parms.append('cellA') |
---|
1804 | Parms.append('cellC') |
---|
1805 | else: |
---|
1806 | Parms.append('cellA') |
---|
1807 | Parms.append('cellB') |
---|
1808 | Parms.append('cellC') |
---|
1809 | if Layers['Laue'] != 'mmm': |
---|
1810 | Parms.append('cellG') |
---|
1811 | #Transition parms |
---|
1812 | Trans = Layers['Transitions'] |
---|
1813 | for iY in range(len(Layers['Layers'])): |
---|
1814 | for iX in range(len(Layers['Layers'])): |
---|
1815 | Parms.append('TransP;%d;%d'%(iY,iX)) |
---|
1816 | Parms.append('TransX;%d;%d'%(iY,iX)) |
---|
1817 | Parms.append('TransY;%d;%d'%(iY,iX)) |
---|
1818 | Parms.append('TransZ;%d;%d'%(iY,iX)) |
---|
1819 | return Parms |
---|
1820 | |
---|
1821 | def StackSim(Layers,ctrls,scale=0.,background={},limits=[],inst={},profile=[]): |
---|
1822 | '''Simulate powder or selected area diffraction pattern from stacking faults using DIFFaX |
---|
1823 | |
---|
1824 | param: Layers dict: 'Laue':'-1','Cell':[False,1.,1.,1.,90.,90.,90,1.], |
---|
1825 | 'Width':[[10.,10.],[False,False]],'Toler':0.01,'AtInfo':{}, |
---|
1826 | 'Layers':[],'Stacking':[],'Transitions':[]} |
---|
1827 | param: ctrls string: controls string to be written on DIFFaX controls.dif file |
---|
1828 | param: scale float: scale factor |
---|
1829 | param: background dict: background parameters |
---|
1830 | param: limits list: min/max 2-theta to be calculated |
---|
1831 | param: inst dict: instrument parameters dictionary |
---|
1832 | param: profile list: powder pattern data |
---|
1833 | |
---|
1834 | all updated in place |
---|
1835 | ''' |
---|
1836 | import atmdata |
---|
1837 | path = sys.path |
---|
1838 | for name in path: |
---|
1839 | if 'bin' in name: |
---|
1840 | DIFFaX = name+'/DIFFaX.exe' |
---|
1841 | print ' Execute ',DIFFaX |
---|
1842 | break |
---|
1843 | # make form factor file that DIFFaX wants - atom types are GSASII style |
---|
1844 | sf = open('data.sfc','w') |
---|
1845 | sf.write('GSASII special form factor file for DIFFaX\n\n') |
---|
1846 | atTypes = Layers['AtInfo'].keys() |
---|
1847 | if 'H' not in atTypes: |
---|
1848 | atTypes.insert(0,'H') |
---|
1849 | for atType in atTypes: |
---|
1850 | if atType == 'H': |
---|
1851 | blen = -.3741 |
---|
1852 | else: |
---|
1853 | blen = Layers['AtInfo'][atType]['Isotopes']['Nat. Abund.']['SL'][0] |
---|
1854 | Adat = atmdata.XrayFF[atType] |
---|
1855 | text = '%4s'%(atType.ljust(4)) |
---|
1856 | for i in range(4): |
---|
1857 | text += '%11.6f%11.6f'%(Adat['fa'][i],Adat['fb'][i]) |
---|
1858 | text += '%11.6f%11.6f'%(Adat['fc'],blen) |
---|
1859 | text += '%3d\n'%(Adat['Z']) |
---|
1860 | sf.write(text) |
---|
1861 | sf.close() |
---|
1862 | #make DIFFaX control.dif file - future use GUI to set some of these flags |
---|
1863 | cf = open('control.dif','w') |
---|
1864 | if ctrls == '0\n0\n3\n' or ctrls == '0\n1\n3\n': |
---|
1865 | x0 = profile[0] |
---|
1866 | iBeg = np.searchsorted(x0,limits[0]) |
---|
1867 | iFin = np.searchsorted(x0,limits[1]) |
---|
1868 | if iFin-iBeg > 20000: |
---|
1869 | iFin = iBeg+20000 |
---|
1870 | Dx = (x0[iFin]-x0[iBeg])/(iFin-iBeg) |
---|
1871 | cf.write('GSASII-DIFFaX.dat\n'+ctrls) |
---|
1872 | cf.write('%.6f %.6f %.6f\n1\n1\nend\n'%(x0[iBeg],x0[iFin],Dx)) |
---|
1873 | else: |
---|
1874 | cf.write('GSASII-DIFFaX.dat\n'+ctrls) |
---|
1875 | inst = {'Type':['XSC','XSC',]} |
---|
1876 | cf.close() |
---|
1877 | #make DIFFaX data file |
---|
1878 | df = open('GSASII-DIFFaX.dat','w') |
---|
1879 | df.write('INSTRUMENTAL\n') |
---|
1880 | if 'X' in inst['Type'][0]: |
---|
1881 | df.write('X-RAY\n') |
---|
1882 | elif 'N' in inst['Type'][0]: |
---|
1883 | df.write('NEUTRON\n') |
---|
1884 | if ctrls == '0\n0\n3\n' or ctrls == '0\n1\n3\n': |
---|
1885 | df.write('%.4f\n'%(G2mth.getMeanWave(inst))) |
---|
1886 | U = ateln2*inst['U'][1]/10000. |
---|
1887 | V = ateln2*inst['V'][1]/10000. |
---|
1888 | W = ateln2*inst['W'][1]/10000. |
---|
1889 | HWHM = U*nptand(x0[iBeg:iFin]/2.)**2+V*nptand(x0[iBeg:iFin]/2.)+W |
---|
1890 | HW = np.sqrt(np.mean(HWHM)) |
---|
1891 | # df.write('PSEUDO-VOIGT 0.015 -0.0036 0.009 0.605 TRIM\n') |
---|
1892 | if 'Mean' in Layers['selInst']: |
---|
1893 | df.write('GAUSSIAN %.6f TRIM\n'%(HW)) #fast option - might not really matter |
---|
1894 | elif 'Gaussian' in Layers['selInst']: |
---|
1895 | df.write('GAUSSIAN %.6f %.6f %.6f TRIM\n'%(U,V,W)) #slow - make a GUI option? |
---|
1896 | else: |
---|
1897 | df.write('None\n') |
---|
1898 | else: |
---|
1899 | df.write('0.10\nNone\n') |
---|
1900 | df.write('STRUCTURAL\n') |
---|
1901 | a,b,c = Layers['Cell'][1:4] |
---|
1902 | gam = Layers['Cell'][6] |
---|
1903 | df.write('%.4f %.4f %.4f %.3f\n'%(a,b,c,gam)) |
---|
1904 | laue = Layers['Laue'] |
---|
1905 | if laue == '2/m(ab)': |
---|
1906 | laue = '2/m(1)' |
---|
1907 | elif laue == '2/m(c)': |
---|
1908 | laue = '2/m(2)' |
---|
1909 | if 'unknown' in Layers['Laue']: |
---|
1910 | df.write('%s %.3f\n'%(laue,Layers['Toler'])) |
---|
1911 | else: |
---|
1912 | df.write('%s\n'%(laue)) |
---|
1913 | df.write('%d\n'%(len(Layers['Layers']))) |
---|
1914 | if Layers['Width'][0][0] < 1. or Layers['Width'][0][1] < 1.: |
---|
1915 | df.write('%.1f %.1f\n'%(Layers['Width'][0][0]*10000.,Layers['Width'][0][0]*10000.)) #mum to A |
---|
1916 | layerNames = [] |
---|
1917 | for layer in Layers['Layers']: |
---|
1918 | layerNames.append(layer['Name']) |
---|
1919 | for il,layer in enumerate(Layers['Layers']): |
---|
1920 | if layer['SameAs']: |
---|
1921 | df.write('LAYER %d = %d\n'%(il+1,layerNames.index(layer['SameAs'])+1)) |
---|
1922 | continue |
---|
1923 | df.write('LAYER %d\n'%(il+1)) |
---|
1924 | if '-1' in layer['Symm']: |
---|
1925 | df.write('CENTROSYMMETRIC\n') |
---|
1926 | else: |
---|
1927 | df.write('NONE\n') |
---|
1928 | for ia,atom in enumerate(layer['Atoms']): |
---|
1929 | [name,atype,x,y,z,frac,Uiso] = atom |
---|
1930 | if '-1' in layer['Symm'] and [x,y,z] == [0.,0.,0.]: |
---|
1931 | frac /= 2. |
---|
1932 | df.write('%4s %3d %.5f %.5f %.5f %.4f %.2f\n'%(atype.ljust(6),ia,x,y,z,78.9568*Uiso,frac)) |
---|
1933 | df.write('STACKING\n') |
---|
1934 | df.write('%s\n'%(Layers['Stacking'][0])) |
---|
1935 | if 'recursive' in Layers['Stacking'][0]: |
---|
1936 | df.write('%s\n'%Layers['Stacking'][1]) |
---|
1937 | else: |
---|
1938 | if 'list' in Layers['Stacking'][1]: |
---|
1939 | Slen = len(Layers['Stacking'][2]) |
---|
1940 | iB = 0 |
---|
1941 | iF = 0 |
---|
1942 | while True: |
---|
1943 | iF += 68 |
---|
1944 | if iF >= Slen: |
---|
1945 | break |
---|
1946 | iF = min(iF,Slen) |
---|
1947 | df.write('%s\n'%(Layers['Stacking'][2][iB:iF])) |
---|
1948 | iB = iF |
---|
1949 | else: |
---|
1950 | df.write('%s\n'%Layers['Stacking'][1]) |
---|
1951 | df.write('TRANSITIONS\n') |
---|
1952 | for iY in range(len(Layers['Layers'])): |
---|
1953 | sumPx = 0. |
---|
1954 | for iX in range(len(Layers['Layers'])): |
---|
1955 | p,dx,dy,dz = Layers['Transitions'][iY][iX][:4] |
---|
1956 | p = round(p,3) |
---|
1957 | df.write('%.3f %.5f %.5f %.5f\n'%(p,dx,dy,dz)) |
---|
1958 | sumPx += p |
---|
1959 | if sumPx != 1.0: #this has to be picky since DIFFaX is. |
---|
1960 | print 'ERROR - Layer probabilities sum to ',sumPx,' DIFFaX will insist it = 1.0' |
---|
1961 | df.close() |
---|
1962 | os.remove('data.sfc') |
---|
1963 | os.remove('control.dif') |
---|
1964 | os.remove('GSASII-DIFFaX.dat') |
---|
1965 | return |
---|
1966 | df.close() |
---|
1967 | time0 = time.time() |
---|
1968 | try: |
---|
1969 | subp.call(DIFFaX) |
---|
1970 | except OSError: |
---|
1971 | print ' DIFFax.exe is not available for this platform - under development' |
---|
1972 | print ' DIFFaX time = %.2fs'%(time.time()-time0) |
---|
1973 | if os.path.exists('GSASII-DIFFaX.spc'): |
---|
1974 | Xpat = np.loadtxt('GSASII-DIFFaX.spc').T |
---|
1975 | iFin = iBeg+Xpat.shape[1] |
---|
1976 | bakType,backDict,backVary = SetBackgroundParms(background) |
---|
1977 | backDict['Lam1'] = G2mth.getWave(inst) |
---|
1978 | profile[4][iBeg:iFin] = getBackground('',backDict,bakType,inst['Type'][0],profile[0][iBeg:iFin])[0] |
---|
1979 | profile[3][iBeg:iFin] = Xpat[-1]*scale+profile[4][iBeg:iFin] |
---|
1980 | if not np.any(profile[1]): #fill dummy data x,y,w,yc,yb,yd |
---|
1981 | rv = st.poisson(profile[3][iBeg:iFin]) |
---|
1982 | profile[1][iBeg:iFin] = rv.rvs() |
---|
1983 | Z = np.ones_like(profile[3][iBeg:iFin]) |
---|
1984 | Z[1::2] *= -1 |
---|
1985 | profile[1][iBeg:iFin] = profile[3][iBeg:iFin]+np.abs(profile[1][iBeg:iFin]-profile[3][iBeg:iFin])*Z |
---|
1986 | profile[2][iBeg:iFin] = np.where(profile[1][iBeg:iFin]>0.,1./profile[1][iBeg:iFin],1.0) |
---|
1987 | profile[5][iBeg:iFin] = profile[1][iBeg:iFin]-profile[3][iBeg:iFin] |
---|
1988 | #cleanup files.. |
---|
1989 | os.remove('GSASII-DIFFaX.spc') |
---|
1990 | elif os.path.exists('GSASII-DIFFaX.sadp'): |
---|
1991 | Sadp = np.fromfile('GSASII-DIFFaX.sadp','>u2') |
---|
1992 | Sadp = np.reshape(Sadp,(256,-1)) |
---|
1993 | Layers['Sadp']['Img'] = Sadp |
---|
1994 | os.remove('GSASII-DIFFaX.sadp') |
---|
1995 | os.remove('data.sfc') |
---|
1996 | os.remove('control.dif') |
---|
1997 | os.remove('GSASII-DIFFaX.dat') |
---|
1998 | |
---|
1999 | def SetPWDRscan(inst,limits,profile): |
---|
2000 | |
---|
2001 | wave = G2mth.getMeanWave(inst) |
---|
2002 | x0 = profile[0] |
---|
2003 | iBeg = np.searchsorted(x0,limits[0]) |
---|
2004 | iFin = np.searchsorted(x0,limits[1]) |
---|
2005 | if iFin-iBeg > 20000: |
---|
2006 | iFin = iBeg+20000 |
---|
2007 | Dx = (x0[iFin]-x0[iBeg])/(iFin-iBeg) |
---|
2008 | pyx.pygetinst(wave,x0[iBeg],x0[iFin],Dx) |
---|
2009 | return iFin-iBeg |
---|
2010 | |
---|
2011 | def SetStackingSF(Layers,debug): |
---|
2012 | # Load scattering factors into DIFFaX arrays |
---|
2013 | import atmdata |
---|
2014 | atTypes = Layers['AtInfo'].keys() |
---|
2015 | aTypes = [] |
---|
2016 | for atype in atTypes: |
---|
2017 | aTypes.append('%4s'%(atype.ljust(4))) |
---|
2018 | SFdat = [] |
---|
2019 | for atType in atTypes: |
---|
2020 | if atType == 'H': |
---|
2021 | blen = -.3741 |
---|
2022 | else: |
---|
2023 | blen = Layers['AtInfo'][atType]['Isotopes']['Nat. Abund.']['SL'][0] |
---|
2024 | Adat = atmdata.XrayFF[atType] |
---|
2025 | SF = np.zeros(9) |
---|
2026 | SF[:8:2] = Adat['fa'] |
---|
2027 | SF[1:8:2] = Adat['fb'] |
---|
2028 | SF[8] = Adat['fc'] |
---|
2029 | SFdat.append(SF) |
---|
2030 | SFdat = np.array(SFdat) |
---|
2031 | pyx.pyloadscf(len(atTypes),aTypes,SFdat.T,debug) |
---|
2032 | |
---|
2033 | def SetStackingClay(Layers,Type): |
---|
2034 | # Controls |
---|
2035 | rand.seed() |
---|
2036 | ranSeed = rand.randint(1,2**16-1) |
---|
2037 | try: |
---|
2038 | laueId = ['-1','2/m(ab)','2/m(c)','mmm','-3','-3m','4/m','4/mmm', |
---|
2039 | '6/m','6/mmm'].index(Layers['Laue'])+1 |
---|
2040 | except ValueError: #for 'unknown' |
---|
2041 | laueId = -1 |
---|
2042 | if 'SADP' in Type: |
---|
2043 | planeId = ['h0l','0kl','hhl','h-hl'].index(Layers['Sadp']['Plane'])+1 |
---|
2044 | lmax = int(Layers['Sadp']['Lmax']) |
---|
2045 | else: |
---|
2046 | planeId = 0 |
---|
2047 | lmax = 0 |
---|
2048 | # Sequences |
---|
2049 | StkType = ['recursive','explicit'].index(Layers['Stacking'][0]) |
---|
2050 | try: |
---|
2051 | StkParm = ['infinite','random','list'].index(Layers['Stacking'][1]) |
---|
2052 | except ValueError: |
---|
2053 | StkParm = -1 |
---|
2054 | if StkParm == 2: #list |
---|
2055 | StkSeq = [int(val) for val in Layers['Stacking'][2].split()] |
---|
2056 | Nstk = len(StkSeq) |
---|
2057 | else: |
---|
2058 | Nstk = 1 |
---|
2059 | StkSeq = [0,] |
---|
2060 | if StkParm == -1: |
---|
2061 | StkParm = int(Layers['Stacking'][1]) |
---|
2062 | Wdth = Layers['Width'][0] |
---|
2063 | mult = 1 |
---|
2064 | controls = [laueId,planeId,lmax,mult,StkType,StkParm,ranSeed] |
---|
2065 | LaueSym = Layers['Laue'].ljust(12) |
---|
2066 | pyx.pygetclay(controls,LaueSym,Wdth,Nstk,StkSeq) |
---|
2067 | return laueId,controls |
---|
2068 | |
---|
2069 | def SetCellAtoms(Layers): |
---|
2070 | Cell = Layers['Cell'][1:4]+Layers['Cell'][6:7] |
---|
2071 | # atoms in layers |
---|
2072 | atTypes = Layers['AtInfo'].keys() |
---|
2073 | AtomXOU = [] |
---|
2074 | AtomTp = [] |
---|
2075 | LayerSymm = [] |
---|
2076 | LayerNum = [] |
---|
2077 | layerNames = [] |
---|
2078 | Natm = 0 |
---|
2079 | Nuniq = 0 |
---|
2080 | for layer in Layers['Layers']: |
---|
2081 | layerNames.append(layer['Name']) |
---|
2082 | for il,layer in enumerate(Layers['Layers']): |
---|
2083 | if layer['SameAs']: |
---|
2084 | LayerNum.append(layerNames.index(layer['SameAs'])+1) |
---|
2085 | continue |
---|
2086 | else: |
---|
2087 | LayerNum.append(il+1) |
---|
2088 | Nuniq += 1 |
---|
2089 | if '-1' in layer['Symm']: |
---|
2090 | LayerSymm.append(1) |
---|
2091 | else: |
---|
2092 | LayerSymm.append(0) |
---|
2093 | for ia,atom in enumerate(layer['Atoms']): |
---|
2094 | [name,atype,x,y,z,frac,Uiso] = atom |
---|
2095 | Natm += 1 |
---|
2096 | AtomTp.append('%4s'%(atype.ljust(4))) |
---|
2097 | Ta = atTypes.index(atype)+1 |
---|
2098 | AtomXOU.append([float(Nuniq),float(ia+1),float(Ta),x,y,z,frac,Uiso*78.9568]) |
---|
2099 | AtomXOU = np.array(AtomXOU) |
---|
2100 | Nlayers = len(layerNames) |
---|
2101 | pyx.pycellayer(Cell,Natm,AtomTp,AtomXOU.T,Nuniq,LayerSymm,Nlayers,LayerNum) |
---|
2102 | return Nlayers |
---|
2103 | |
---|
2104 | def SetStackingTrans(Layers,Nlayers): |
---|
2105 | # Transitions |
---|
2106 | TransX = [] |
---|
2107 | TransP = [] |
---|
2108 | for Ytrans in Layers['Transitions']: |
---|
2109 | TransP.append([trans[0] for trans in Ytrans]) #get just the numbers |
---|
2110 | TransX.append([trans[1:4] for trans in Ytrans]) #get just the numbers |
---|
2111 | TransP = np.array(TransP,dtype='float').T |
---|
2112 | TransX = np.array(TransX,dtype='float') |
---|
2113 | # GSASIIpath.IPyBreak() |
---|
2114 | pyx.pygettrans(Nlayers,TransP,TransX) |
---|
2115 | |
---|
2116 | def CalcStackingPWDR(Layers,scale,background,limits,inst,profile,debug): |
---|
2117 | # Scattering factors |
---|
2118 | SetStackingSF(Layers,debug) |
---|
2119 | # Controls & sequences |
---|
2120 | laueId,controls = SetStackingClay(Layers,'PWDR') |
---|
2121 | # cell & atoms |
---|
2122 | Nlayers = SetCellAtoms(Layers) |
---|
2123 | Volume = Layers['Cell'][7] |
---|
2124 | # Transitions |
---|
2125 | SetStackingTrans(Layers,Nlayers) |
---|
2126 | # PWDR scan |
---|
2127 | Nsteps = SetPWDRscan(inst,limits,profile) |
---|
2128 | # result as Spec |
---|
2129 | x0 = profile[0] |
---|
2130 | profile[3] = np.zeros(len(profile[0])) |
---|
2131 | profile[4] = np.zeros(len(profile[0])) |
---|
2132 | profile[5] = np.zeros(len(profile[0])) |
---|
2133 | iBeg = np.searchsorted(x0,limits[0]) |
---|
2134 | iFin = np.searchsorted(x0,limits[1]) |
---|
2135 | if iFin-iBeg > 20000: |
---|
2136 | iFin = iBeg+20000 |
---|
2137 | Nspec = 20001 |
---|
2138 | spec = np.zeros(Nspec,dtype='double') |
---|
2139 | time0 = time.time() |
---|
2140 | pyx.pygetspc(controls,Nspec,spec) |
---|
2141 | print ' GETSPC time = %.2fs'%(time.time()-time0) |
---|
2142 | time0 = time.time() |
---|
2143 | U = ateln2*inst['U'][1]/10000. |
---|
2144 | V = ateln2*inst['V'][1]/10000. |
---|
2145 | W = ateln2*inst['W'][1]/10000. |
---|
2146 | HWHM = U*nptand(x0[iBeg:iFin]/2.)**2+V*nptand(x0[iBeg:iFin]/2.)+W |
---|
2147 | HW = np.sqrt(np.mean(HWHM)) |
---|
2148 | BrdSpec = np.zeros(Nsteps) |
---|
2149 | if 'Mean' in Layers['selInst']: |
---|
2150 | pyx.pyprofile(U,V,W,HW,1,Nsteps,BrdSpec) |
---|
2151 | elif 'Gaussian' in Layers['selInst']: |
---|
2152 | pyx.pyprofile(U,V,W,HW,4,Nsteps,BrdSpec) |
---|
2153 | else: |
---|
2154 | BrdSpec = spec[:Nsteps] |
---|
2155 | BrdSpec /= Volume |
---|
2156 | iFin = iBeg+Nsteps |
---|
2157 | bakType,backDict,backVary = SetBackgroundParms(background) |
---|
2158 | backDict['Lam1'] = G2mth.getWave(inst) |
---|
2159 | profile[4][iBeg:iFin] = getBackground('',backDict,bakType,inst['Type'][0],profile[0][iBeg:iFin])[0] |
---|
2160 | profile[3][iBeg:iFin] = BrdSpec*scale+profile[4][iBeg:iFin] |
---|
2161 | if not np.any(profile[1]): #fill dummy data x,y,w,yc,yb,yd |
---|
2162 | rv = st.poisson(profile[3][iBeg:iFin]) |
---|
2163 | profile[1][iBeg:iFin] = rv.rvs() |
---|
2164 | Z = np.ones_like(profile[3][iBeg:iFin]) |
---|
2165 | Z[1::2] *= -1 |
---|
2166 | profile[1][iBeg:iFin] = profile[3][iBeg:iFin]+np.abs(profile[1][iBeg:iFin]-profile[3][iBeg:iFin])*Z |
---|
2167 | profile[2][iBeg:iFin] = np.where(profile[1][iBeg:iFin]>0.,1./profile[1][iBeg:iFin],1.0) |
---|
2168 | profile[5][iBeg:iFin] = profile[1][iBeg:iFin]-profile[3][iBeg:iFin] |
---|
2169 | print ' Broadening time = %.2fs'%(time.time()-time0) |
---|
2170 | |
---|
2171 | def CalcStackingSADP(Layers,debug): |
---|
2172 | |
---|
2173 | # Scattering factors |
---|
2174 | SetStackingSF(Layers,debug) |
---|
2175 | # Controls & sequences |
---|
2176 | laueId,controls = SetStackingClay(Layers,'SADP') |
---|
2177 | # cell & atoms |
---|
2178 | Nlayers = SetCellAtoms(Layers) |
---|
2179 | # Transitions |
---|
2180 | SetStackingTrans(Layers,Nlayers) |
---|
2181 | # result as Sadp |
---|
2182 | mirror = laueId in [-1,2,3,7,8,9,10] |
---|
2183 | Nspec = 20001 |
---|
2184 | spec = np.zeros(Nspec,dtype='double') |
---|
2185 | time0 = time.time() |
---|
2186 | hkLim,Incr,Nblk = pyx.pygetsadp(controls,Nspec,spec) |
---|
2187 | Sapd = np.zeros((256,256)) |
---|
2188 | iB = 0 |
---|
2189 | for i in range(hkLim): |
---|
2190 | iF = iB+Nblk |
---|
2191 | p1 = 127+int(i*Incr) |
---|
2192 | p2 = 128-int(i*Incr) |
---|
2193 | if Nblk == 128: |
---|
2194 | if i: |
---|
2195 | Sapd[128:,p1] = spec[iB:iF] |
---|
2196 | Sapd[:128,p1] = spec[iF:iB:-1] |
---|
2197 | Sapd[128:,p2] = spec[iB:iF] |
---|
2198 | Sapd[:128,p2] = spec[iF:iB:-1] |
---|
2199 | else: |
---|
2200 | if i: |
---|
2201 | Sapd[:,p1] = spec[iB:iF] |
---|
2202 | Sapd[:,p2] = spec[iB:iF] |
---|
2203 | iB += Nblk |
---|
2204 | Layers['Sadp']['Img'] = Sapd |
---|
2205 | print ' GETSAD time = %.2fs'%(time.time()-time0) |
---|
2206 | # GSASIIpath.IPyBreak() |
---|
2207 | |
---|
2208 | #testing data |
---|
2209 | NeedTestData = True |
---|
2210 | def TestData(): |
---|
2211 | 'needs a doc string' |
---|
2212 | # global NeedTestData |
---|
2213 | NeedTestData = False |
---|
2214 | global bakType |
---|
2215 | bakType = 'chebyschev' |
---|
2216 | global xdata |
---|
2217 | xdata = np.linspace(4.0,40.0,36000) |
---|
2218 | global parmDict0 |
---|
2219 | parmDict0 = { |
---|
2220 | 'pos0':5.6964,'int0':8835.8,'sig0':1.0,'gam0':1.0, |
---|
2221 | 'pos1':11.4074,'int1':3922.3,'sig1':1.0,'gam1':1.0, |
---|
2222 | 'pos2':20.6426,'int2':1573.7,'sig2':1.0,'gam2':1.0, |
---|
2223 | 'pos3':26.9568,'int3':925.1,'sig3':1.0,'gam3':1.0, |
---|
2224 | 'U':1.163,'V':-0.605,'W':0.093,'X':0.0,'Y':2.183,'SH/L':0.002, |
---|
2225 | 'Back0':5.384,'Back1':-0.015,'Back2':.004, |
---|
2226 | } |
---|
2227 | global parmDict1 |
---|
2228 | parmDict1 = { |
---|
2229 | 'pos0':13.4924,'int0':48697.6,'sig0':1.0,'gam0':1.0, |
---|
2230 | 'pos1':23.4360,'int1':43685.5,'sig1':1.0,'gam1':1.0, |
---|
2231 | 'pos2':27.1152,'int2':123712.6,'sig2':1.0,'gam2':1.0, |
---|
2232 | 'pos3':33.7196,'int3':65349.4,'sig3':1.0,'gam3':1.0, |
---|
2233 | 'pos4':36.1119,'int4':115829.8,'sig4':1.0,'gam4':1.0, |
---|
2234 | 'pos5':39.0122,'int5':6916.9,'sig5':1.0,'gam5':1.0, |
---|
2235 | 'U':22.75,'V':-17.596,'W':10.594,'X':1.577,'Y':5.778,'SH/L':0.002, |
---|
2236 | 'Back0':36.897,'Back1':-0.508,'Back2':.006, |
---|
2237 | 'Lam1':1.540500,'Lam2':1.544300,'I(L2)/I(L1)':0.5, |
---|
2238 | } |
---|
2239 | global parmDict2 |
---|
2240 | parmDict2 = { |
---|
2241 | 'pos0':5.7,'int0':1000.0,'sig0':0.5,'gam0':0.5, |
---|
2242 | 'U':2.,'V':-2.,'W':5.,'X':0.5,'Y':0.5,'SH/L':0.02, |
---|
2243 | 'Back0':5.,'Back1':-0.02,'Back2':.004, |
---|
2244 | # 'Lam1':1.540500,'Lam2':1.544300,'I(L2)/I(L1)':0.5, |
---|
2245 | } |
---|
2246 | global varyList |
---|
2247 | varyList = [] |
---|
2248 | |
---|
2249 | def test0(): |
---|
2250 | if NeedTestData: TestData() |
---|
2251 | msg = 'test ' |
---|
2252 | gplot = plotter.add('FCJ-Voigt, 11BM').gca() |
---|
2253 | gplot.plot(xdata,getBackground('',parmDict0,bakType,'PXC',xdata)[0]) |
---|
2254 | gplot.plot(xdata,getPeakProfile(parmDict0,xdata,varyList,bakType)) |
---|
2255 | fplot = plotter.add('FCJ-Voigt, Ka1+2').gca() |
---|
2256 | fplot.plot(xdata,getBackground('',parmDict1,bakType,'PXC',xdata)[0]) |
---|
2257 | fplot.plot(xdata,getPeakProfile(parmDict1,xdata,varyList,bakType)) |
---|
2258 | |
---|
2259 | def test1(): |
---|
2260 | if NeedTestData: TestData() |
---|
2261 | time0 = time.time() |
---|
2262 | for i in range(100): |
---|
2263 | y = getPeakProfile(parmDict1,xdata,varyList,bakType) |
---|
2264 | print '100+6*Ka1-2 peaks=1200 peaks',time.time()-time0 |
---|
2265 | |
---|
2266 | def test2(name,delt): |
---|
2267 | if NeedTestData: TestData() |
---|
2268 | varyList = [name,] |
---|
2269 | xdata = np.linspace(5.6,5.8,400) |
---|
2270 | hplot = plotter.add('derivatives test for '+name).gca() |
---|
2271 | hplot.plot(xdata,getPeakProfileDerv(parmDict2,xdata,varyList,bakType)[0]) |
---|
2272 | y0 = getPeakProfile(parmDict2,xdata,varyList,bakType) |
---|
2273 | parmDict2[name] += delt |
---|
2274 | y1 = getPeakProfile(parmDict2,xdata,varyList,bakType) |
---|
2275 | hplot.plot(xdata,(y1-y0)/delt,'r+') |
---|
2276 | |
---|
2277 | def test3(name,delt): |
---|
2278 | if NeedTestData: TestData() |
---|
2279 | names = ['pos','sig','gam','shl'] |
---|
2280 | idx = names.index(name) |
---|
2281 | myDict = {'pos':parmDict2['pos0'],'sig':parmDict2['sig0'],'gam':parmDict2['gam0'],'shl':parmDict2['SH/L']} |
---|
2282 | xdata = np.linspace(5.6,5.8,800) |
---|
2283 | dx = xdata[1]-xdata[0] |
---|
2284 | hplot = plotter.add('derivatives test for '+name).gca() |
---|
2285 | hplot.plot(xdata,100.*dx*getdFCJVoigt3(myDict['pos'],myDict['sig'],myDict['gam'],myDict['shl'],xdata)[idx+1]) |
---|
2286 | y0 = getFCJVoigt3(myDict['pos'],myDict['sig'],myDict['gam'],myDict['shl'],xdata) |
---|
2287 | myDict[name] += delt |
---|
2288 | y1 = getFCJVoigt3(myDict['pos'],myDict['sig'],myDict['gam'],myDict['shl'],xdata) |
---|
2289 | hplot.plot(xdata,(y1-y0)/delt,'r+') |
---|
2290 | |
---|
2291 | if __name__ == '__main__': |
---|
2292 | import GSASIItestplot as plot |
---|
2293 | global plotter |
---|
2294 | plotter = plot.PlotNotebook() |
---|
2295 | # test0() |
---|
2296 | # for name in ['int0','pos0','sig0','gam0','U','V','W','X','Y','SH/L','I(L2)/I(L1)']: |
---|
2297 | for name,shft in [['int0',0.1],['pos0',0.0001],['sig0',0.01],['gam0',0.00001], |
---|
2298 | ['U',0.1],['V',0.01],['W',0.01],['X',0.0001],['Y',0.0001],['SH/L',0.00005]]: |
---|
2299 | test2(name,shft) |
---|
2300 | for name,shft in [['pos',0.0001],['sig',0.01],['gam',0.0001],['shl',0.00005]]: |
---|
2301 | test3(name,shft) |
---|
2302 | print "OK" |
---|
2303 | plotter.StartEventLoop() |
---|