"""
A grammar to read the data cubes from CALIFA data release 3.

These have an incredibly verbose header, plus six extensions.  See Paper,
Table 1, for details.

This is a dispatched grammar; it *could* read the actual position/flux data
as well, but that's now done using res/booster, so it's suppressed here.

We check badpix; if all of them are 1 of a given spectrum,
it's "empty"; that's especially pertinent for the pixels outside the
data hexagon.
"""

from gavo import base
from gavo import rscdef
from gavo.base import coords
from gavo.utils import pyfits
from gavo.grammars.customgrammar import CustomRowIterator

class RowIterator(CustomRowIterator):
	indices = 0,0

	def _getWCSMapper(self, refPix, refVal, cDelt):
		def mapper(val):
			return (val-refPix)*cDelt+refVal
		return mapper

	def _getWavelengthMapper(self, primaryHeader):
		return self._getWCSMapper(
			primaryHeader["CRPIX3"], primaryHeader["CRVAL3"],
			primaryHeader["CDELT3"])

	def _iterRows(self):
		hdus = pyfits.open(self.sourceToken)

		if "V1200" in self.sourceToken:
			setup = "V1200"
		elif "V500" in self.sourceToken:
			setup = "V500"
		elif "COMB" in self.sourceToken:
			setup = "COMB"
		else:
			setup = "?SETUP?"
		
		# some files have SDSS flats, others don't
		if len(hdus)==5:
			primary, err, errweight, badpix, fiberCov = hdus
		elif len(hdus)==1:
			# that's crap that's sneaked into the data directory.
			base.ui.notifyError("Bad Califa DR3 source: %s"%self.sourceToken)
			return
		else:
			primary, err, errweight, badpix, fiberCov, flat = hdus
		posMapper = coords.getWCS(primary.header, naxis=(1,2)).all_pix2world

		baseMeta = {}
		for card in primary.header.cards:
			try:
				baseMeta[card.keyword] = card.value
			except ValueError as msg:
				base.ui.notifyError(
					"Bad card in %s: %s"%(self.sourceToken, str(msg)))

		# the GAVO-internal obsid is (base accref)-(x-index)-(y-index);
		# (base accref) is the standard accref minus the extension.
		
		idPattern = "%s-%%02d-%%02d"%(rscdef.getInputsRelativePath(
			self.sourceToken)[:-12])

		# now, for each spectrum, make up an obsid and return the primary
		# keys plus the obsid to the metadata table, and then the spectrum
		# to the fluxes table
		# The array axes are (3/Spectrum, 2/Dec, 1/RA)
		specPoints, decPoints, raPoints = primary.data.shape

		for raInd in range(raPoints):
			for decInd in range(decPoints):
				self.indices = raInd, decInd

				if min(badpix.data[:, decInd, raInd])==1:
					# all pixels in this spectrum are bad, it's a filler, skip it
					continue

				curId = idPattern%(raInd, decInd)
				thisMeta = baseMeta.copy()
				thisMeta["obsId"] = curId
				thisMeta["setup"] = setup
				thisMeta["raInd"] = raInd
				thisMeta["decInd"] = decInd
				physPos = posMapper((raInd,), (decInd,), 0)
				thisMeta["RA"] = float(physPos[0][0])
				thisMeta["Dec"] = float(physPos[1][0])

				# the wavelength on which the SNR is computed depends on
				# whether we have a V500 or a V1200 spectrum; I can tell them
				# apart by what headers there are
				if setup=="V500":
					snrWL = 5000
					pixelOffset = 0
				elif setup=="V1200":
					snrWL = 4000
					pixelOffset = 1000
				elif setup=="COMB":
					snrWL = 4000
					pixelOffset = 2000
				else:
					raise base.SourceParseError("Neither V500 nor V1200?",
						source=self.sourceToken)

				snrInd = int((snrWL-thisMeta["CRVAL3"])/thisMeta["CDELT3"]
					+thisMeta["CRPIX3"])
				thisMeta["computedSNR"] = (primary.data[snrInd, decInd, raInd]
					/err.data[snrInd, decInd, raInd])
				thisMeta["xindex"] = raInd+pixelOffset
				thisMeta["yindex"] = decInd+pixelOffset

				yield ("spectra", thisMeta)
				yield ("products", thisMeta)

######## Import of the actual data is now done via a booster.
#
#				dataIter = itertools.izip(primary.data[:, decInd, raInd],
#					err.data[:, decInd, raInd],
#					badpix.data[:, decInd, raInd])
#
#				for wlInd, (flux, fluxErr, bad) in enumerate(dataIter):
#					if bad:
#						yield ("fluxes", {
#							"obsId": curId,
#							"lambda": wlMapper(wlInd+1),
#							"flux": None,
#							"error": None})
#					else:
#						yield ("fluxes", {
#							"obsId": curId,
#							"lambda": wlMapper(wlInd+1),
#							"flux": flux,
#							"error": fluxErr,})

	def getLocator(self):
		return "ra/dec %s/%s"%self.indices


if __name__=="__main__":
	from gavo.user import logui
	from gavo import api
	logui.LoggingUI(api.ui)
	class StandinGrammar:
		rd = api.getRD("califa/q")
	g = StandinGrammar()

	r = RowIterator(g, "data/UGC10693.V1200.rscube.fits", None)
	for rec in r._iterRows():
		print(dict((k,v) for k,v in rec[1].items() if not k.startswith("PPAK") and not k.startswith("PIPE")))
