#!/usr/bin/python
# -*- coding: utf-8 -*-

import os
import re
import unicodedata
import sys
import csv
import string

from os import listdir, rename
from os.path import isfile, join

## répertoire depuis lequel on lance le script
mypath = os.getcwd()
mypath_input_papers = sys.argv[1]

def prepareListPapers():
  papers_map = {}

  ##############################
  # PARSING du fichier CSV     #
  ##############################
  with open(mypath_input_papers) as csvfile:
    filereader = csv.reader(csvfile, delimiter='#')
    i = 0
    for row in filereader:
      paper_line = {}
      if i == 0:
        print "First line (and first column) : ",row[0]
      else : 
        name_paper = row[1]
        name_paper = name_paper.strip(" ")
        id_paper = row[0]
        id_paper = id_paper.translate(None, string.whitespace)
        paper_line = {"id": id_paper, "name": name_paper,"comment": row[2],"first_date": row[3],"last_date": row[4],"authors": row[5]}
        papers_map[name_paper] = paper_line
        print "PAPER : [",id_paper,"]"
      i = i + 1
  return papers_map

def findDuplicateNames(papers, paper_key):
  paper = papers[paper_key]
  name_paper = paper["name"]
  name_paper_2 = name_paper.strip(" ")
  name_paper_2 = name_paper_2[1:len(name_paper_2)-1]
  id_paper = paper["id"]
  for paper_other_key in papers.keys() :
    paper_other = papers[paper_other_key]
    name_other_paper = paper_other["name"]
    name_other_paper_2 = name_other_paper.strip(" ")
    name_other_paper_2 = name_other_paper_2[1:len(name_other_paper_2)-1]
    if name_other_paper_2 in name_paper_2:
      if name_other_paper_2 != name_paper_2:
        id_paper_other = paper_other["id"]
        print "#",id_paper,"#",name_paper_2,"#",id_paper_other,"#",name_other_paper_2

papers = prepareListPapers()
for paper in papers.keys() :
    findDuplicateNames(papers, paper)

