package eu.dnetlib.functionality.index.parse;

import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.StringTokenizer;

import org.apache.lucene.queryParser.QueryParser;

import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.Maps;

public class TermNode extends Node {

	public static final String dnetDefaultField = "__all";
	
	private String field;
	private Relation rel;
	private String value;
	private Map<String, List<String>> options = Maps.newHashMap();
	private BiMap<String, String> aliases = HashBiMap.create();
	private Map<String, String> weights = Maps.newHashMap();
	
	public TermNode(String field, Relation rel, String value) {
		this.field = field;
		this.rel = rel;
		this.value = value;
	}
	
	public TermNode(String field, Relation rel, String value, Map<String, List<String>> options, BiMap<String, String> aliases, Map<String, String> weights) {
		this(field, rel, value);
		this.options = options;
		this.aliases = aliases;
		this.weights = weights;
	}	

	@Override
	public String toString() {
		return TermNode.class.getSimpleName() + "(" + field + " " + rel + " " + value + ")";
	}

	@Override
	public String toLucene() {
		StringTokenizer termTokenizer = new StringTokenizer(value, " ");
		StringTokenizer weightTokenizer = new StringTokenizer(value, " ");
		
		switch (rel) {
			case EXACT:
				return getFieldName() + ":" + "\"" + value + "\"" + weight() + " " + expand(value);
			case ALL:
				return "(" + handleTokens(termTokenizer, "+") + " " + expandTokens(weightTokenizer) + ")";
			case EQUAL:				
			case ANY:
				return "(" + handleTokens(termTokenizer, "") + " " + expandTokens(weightTokenizer) + ")";
			case NOT:
				return "-" + field + ":" + "\"" + value + "\"";
			case LT:
				return field + ":" + "{* TO " + value + "}" + weight();
			case GT:
				return field + ":" + "{" + value + " TO *}" + weight();
			case LTE:
				return field + ":" + "[* TO " + value + "]" + weight();
			case GTE:
				return field + ":" + "[" + value + " TO *]" + weight();
			case WITHIN:
				String lower = checkDate(value.split(" ")[0]);
				String upper = checkDate(value.split(" ")[1]);
				return field + ":[" + lower + " TO " + upper + "]" + weight();
			default:
				throw new RuntimeException("unable to serialize: " + toString());
		}
	}

	private String getFieldName() {
		return aliases.get(field) != null ? aliases.get(field) : field;
	}

	private String weight() {
		String w = weights.get(field);
		return w != null ? "^" + w : "";  
	}
	
	private String expandTokens(StringTokenizer tokenizer) {
		String ret = "";
		while (tokenizer.hasMoreTokens()) {
			String token = tokenizer.nextToken();

			if (field.equals(dnetDefaultField.toLowerCase()) || field.equals(dnetDefaultField.toLowerCase())) {
				ret += expand(token);
			}
		}
		return ret.trim();
	}
	
	private String expand(String token) {
		String ret = "";
		if (!weights.keySet().contains(field)) {
			for(Entry<String, String> e : weights.entrySet()) {
				ret += e.getKey() + ":\"" + checkEscaping(token) + "\"^" + e.getValue() + " ";
			}
		}
		return ret;
	}	

	private String handleTokens(StringTokenizer tokenizer, String op) {
		String ret = "";
		while (tokenizer.hasMoreTokens()) {
			String token = tokenizer.nextToken();
			ret += op + field + ":" + checkEscaping(token) + weight() + " ";
		}
		return ret.trim();
	}
	
	private String checkEscaping(String token) {
		boolean isWildcard = token.contains("*") || token.contains("?");
		boolean isWildcardEnabled = (options.get("wildcard") != null && options.get("wildcard").contains("true")) || token.equals("*");
		
		if (!(isWildcard & isWildcardEnabled) ) {
			token = QueryParser.escape(token);
		}
		return token;
	}

	private String checkDate(final String date) {
		if (!date.endsWith("Z")) {
			return date + "T00:00:00Z";
		}
		return date;
	}
	
	public String getField() {
		return field;
	}

	public Relation getRel() {
		return rel;
	}

	public String getValue() {
		return value;
	}	

}
