"""
Our input is a dump of a normalised SQL schema, consisting of (at least)
plate (basic plate metadata), exposures (of which there may be multiple on one
plate; this is where the astrometry is), scan (containing stuff like pixel
size). and  archives (which we use to provide a guess at collections).

We're going for obscore exclusively here, and the data set is small enough,
so we do the entire reconstruction in python.
"""

import os
import re

from gavo import base
from gavo.grammars.customgrammar import CustomRowIterator


class RowIterator(CustomRowIterator):
  def _iterRows(self):
  	return iter(self.grammar.dataPack)


def op_NULL(a, b, op):
	if a is None:
		return b
	if b is None:
		return a
	return op(a, b)


def floatsum(*args):
	return sum(float(s) for s in args)


def loadTableFromDump(basePath, dumpName, tableName, keyPrefix=""):
	signature = "COPY %s ("%tableName
	result = []

	with open(os.path.join(basePath, dumpName), encoding="utf-8") as f:
		for ln in f:
			if ln.startswith(signature):
				break
		else:
			raise base.ReportableError("Signature %s not found."%signature)

		labels = [keyPrefix+s.strip()
			for s in re.search(r"\((.*)\)", ln).group(1).split(",")]

		for ln in f:
			if ln=="\\.\n":
				break
			values = [None if s.startswith('\\N') else s for s in ln.split("\t")]
			result.append(dict(zip(labels, values)))
	
	return result


def makeDataPack(grammar):
	basePath = os.path.join(grammar.rd.resdir, "data")

	exposures = dict((r["plate_id"], r)
		for r in loadTableFromDump(basePath, "applause_dr3.exposure.dump",
			"applause_dr3.exposure"))

	plates = dict((r["plate_id"], r)
		for r in loadTableFromDump(basePath, "applause_dr3.plate.dump",
			"applause_dr3.plate"))

	archives = dict((r["archive_id"], r)
		for r in loadTableFromDump(basePath, "applause_dr3.archive.dump",
			"applause_dr3.archive"))
	
	solutions = dict((r["plate_id"], r)
		for r in loadTableFromDump(basePath, "applause_dr3.solution.dump",
			"applause_dr3.solution"))

	previews = dict((r["prev_plate_id"], r)
		for r in loadTableFromDump(basePath, "applause_dr3.preview.dump",
			"applause_dr3.preview", "prev_"))

	scans = dict((r["plate_id"], r)
		for r in loadTableFromDump(basePath, "applause_dr3.scan.dump",
			"applause_dr3.scan"))

	records = []
	for pid in plates:
		try:
			rec = plates[pid].copy()
			rec["archive_name"] = archives[rec["archive_id"]]
			rec.update(archives[rec["archive_id"]])
			rec.update(exposures[pid])
			rec.update(solutions[pid])
			rec.update(previews[pid])
			rec.update(scans[pid])
			records.append(rec)
		except KeyError:
			# ignore incomplete records for now (if you want to un-ignore them,
			# note that the test set currently has lots of incomplete records).
			pass
	return records


if __name__=="__main__":
	from gavo.grammars.customgrammar import CustomGrammar
	from gavo import api
	rd = api.getRD("applause/q")
	g = CustomGrammar(rd)
	g.dataPack = makeDataPack(g)
	ri = RowIterator(g, None)
	for row in ri:
		print(row)
		break
