Initial commit
This commit is contained in:
117
data/server_list/GUIDtxtT0Execl.py
Normal file
117
data/server_list/GUIDtxtT0Execl.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from __future__ import annotations
|
||||
import os
|
||||
from pathlib import Path
|
||||
from collections import OrderedDict
|
||||
import pandas as pd
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Cross-platform root resolver (Windows / Linux / macOS)
|
||||
# ------------------------------------------------------------
|
||||
def resolve_data_root() -> Path:
|
||||
"""
|
||||
Priority:
|
||||
1) Env var IDRAC_DATA_DIR (absolute/relative OK)
|
||||
2) nearest parent of this file that contains a 'data' folder
|
||||
3) ./data under current working directory
|
||||
"""
|
||||
env = os.getenv("IDRAC_DATA_DIR")
|
||||
if env:
|
||||
return Path(env).expanduser().resolve()
|
||||
|
||||
here = Path(__file__).resolve()
|
||||
for p in [here] + list(here.parents):
|
||||
if (p / "data").is_dir():
|
||||
return (p / "data").resolve()
|
||||
|
||||
return (Path.cwd() / "data").resolve()
|
||||
|
||||
|
||||
DATA_ROOT = resolve_data_root()
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Paths (can be overridden with env vars if needed)
|
||||
# ------------------------------------------------------------
|
||||
SERVER_LIST_DIR = Path(os.getenv("GUID_SERVER_LIST_DIR", DATA_ROOT / "server_list"))
|
||||
SERVER_LIST_FILE = Path(os.getenv("GUID_LIST_FILE", SERVER_LIST_DIR / "guid_list.txt"))
|
||||
|
||||
GUID_TXT_DIR = Path(os.getenv("GUID_TXT_DIR", DATA_ROOT / "guid_file"))
|
||||
|
||||
OUTPUT_XLSX = Path(
|
||||
os.getenv("GUID_OUTPUT_XLSX", DATA_ROOT / "idrac_info" / "XE9680_GUID.xlsx")
|
||||
)
|
||||
|
||||
# Make sure output directory exists
|
||||
OUTPUT_XLSX.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Utilities
|
||||
# ------------------------------------------------------------
|
||||
def read_lines_any_encoding(path: Path) -> list[str]:
|
||||
"""Read text file trying common encodings (utf-8/utf-8-sig/cp949/euc-kr/latin-1)."""
|
||||
encodings = ["utf-8-sig", "utf-8", "cp949", "euc-kr", "latin-1"]
|
||||
for enc in encodings:
|
||||
try:
|
||||
with path.open("r", encoding=enc, errors="strict") as f:
|
||||
return f.read().splitlines()
|
||||
except Exception:
|
||||
continue
|
||||
# last resort with replacement
|
||||
with path.open("r", encoding="utf-8", errors="replace") as f:
|
||||
return f.read().splitlines()
|
||||
|
||||
|
||||
def parse_txt_with_st(file_path: Path) -> dict:
|
||||
"""
|
||||
Parse a GUID .txt file:
|
||||
- First line becomes 'S/T'
|
||||
- Remaining lines in 'Key: Value' form
|
||||
Keeps insertion order.
|
||||
"""
|
||||
lines = read_lines_any_encoding(file_path)
|
||||
if not lines:
|
||||
return {}
|
||||
|
||||
data = OrderedDict()
|
||||
data["S/T"] = lines[0].strip()
|
||||
|
||||
for raw in lines[1:]:
|
||||
line = raw.strip()
|
||||
if not line or ":" not in line:
|
||||
continue
|
||||
key, value = line.split(":", 1)
|
||||
data[key.strip()] = value.strip()
|
||||
|
||||
return dict(data)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Load list of file basenames from guid_list.txt
|
||||
# ------------------------------------------------------------
|
||||
if not SERVER_LIST_FILE.is_file():
|
||||
raise FileNotFoundError(f"guid_list.txt not found: {SERVER_LIST_FILE}")
|
||||
|
||||
file_names = [x.strip() for x in read_lines_any_encoding(SERVER_LIST_FILE) if x.strip()]
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Collect rows
|
||||
# ------------------------------------------------------------
|
||||
rows: list[dict] = []
|
||||
for name in file_names:
|
||||
txt_path = GUID_TXT_DIR / f"{name}.txt"
|
||||
if not txt_path.is_file():
|
||||
print(f"[WARN] 파일을 찾을 수 없습니다: {txt_path.name}")
|
||||
# still append at least S/T if you want a row placeholder
|
||||
# rows.append({"S/T": name})
|
||||
continue
|
||||
|
||||
rows.append(parse_txt_with_st(txt_path))
|
||||
|
||||
# Build DataFrame (union of keys across all rows)
|
||||
df = pd.DataFrame(rows)
|
||||
|
||||
# Prepend No column (1..N)
|
||||
df.insert(0, "No", range(1, len(df) + 1))
|
||||
|
||||
# Save to Excel
|
||||
df.to_excel(OUTPUT_XLSX, index=False)
|
||||
|
||||
print(f"엑셀 파일이 생성되었습니다: {OUTPUT_XLSX}")
|
||||
109
data/server_list/excel.py
Normal file
109
data/server_list/excel.py
Normal file
@@ -0,0 +1,109 @@
|
||||
from __future__ import annotations
|
||||
import os
|
||||
from pathlib import Path
|
||||
import pandas as pd
|
||||
|
||||
# ---------------------------------------------
|
||||
# Cross-platform paths (Windows & Linux/Mac)
|
||||
# ---------------------------------------------
|
||||
def resolve_data_root() -> Path:
|
||||
"""
|
||||
Priority:
|
||||
1) Env var IDRAC_DATA_DIR (absolute/relative OK)
|
||||
2) nearest parent of this file that contains a 'data' folder
|
||||
3) ./data under current working directory
|
||||
"""
|
||||
env = os.getenv("IDRAC_DATA_DIR")
|
||||
if env:
|
||||
return Path(env).expanduser().resolve()
|
||||
|
||||
here = Path(__file__).resolve()
|
||||
for p in [here] + list(here.parents):
|
||||
if (p / "data").is_dir():
|
||||
return (p / "data").resolve()
|
||||
|
||||
return (Path.cwd() / "data").resolve()
|
||||
|
||||
|
||||
DATA_ROOT = resolve_data_root()
|
||||
|
||||
SERVER_LIST_DIR = DATA_ROOT / "server_list"
|
||||
SERVER_LIST_FILE = SERVER_LIST_DIR / "server_list.txt"
|
||||
|
||||
MAC_TXT_DIR = DATA_ROOT / "mac"
|
||||
OUTPUT_XLSX = DATA_ROOT / "idrac_info" / "mac_info.xlsx"
|
||||
|
||||
# Ensure output directory exists
|
||||
OUTPUT_XLSX.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# ---------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------
|
||||
def read_lines_any_encoding(path: Path) -> list[str]:
|
||||
"""Read a text file trying common encodings (handles Windows & UTF-8)."""
|
||||
encodings = ["utf-8-sig", "utf-8", "cp949", "euc-kr", "latin-1"]
|
||||
for enc in encodings:
|
||||
try:
|
||||
with path.open("r", encoding=enc, errors="strict") as f:
|
||||
return f.read().splitlines()
|
||||
except Exception:
|
||||
continue
|
||||
# last resort with replacement
|
||||
with path.open("r", encoding="utf-8", errors="replace") as f:
|
||||
return f.read().splitlines()
|
||||
|
||||
# ---------------------------------------------
|
||||
# Load server list (file names without .txt)
|
||||
# ---------------------------------------------
|
||||
if not SERVER_LIST_FILE.is_file():
|
||||
raise FileNotFoundError(f"server_list.txt not found: {SERVER_LIST_FILE}")
|
||||
|
||||
file_names = read_lines_any_encoding(SERVER_LIST_FILE)
|
||||
|
||||
data_list: list[str] = []
|
||||
index_list: list[int | str] = []
|
||||
|
||||
sequence_number = 1
|
||||
|
||||
for name in file_names:
|
||||
# normalize and skip blanks
|
||||
base = (name or "").strip()
|
||||
if not base:
|
||||
continue
|
||||
|
||||
txt_path = MAC_TXT_DIR / f"{base}.txt"
|
||||
if not txt_path.is_file():
|
||||
# if a file is missing, keep row aligned with an empty line
|
||||
data_list.append("")
|
||||
index_list.append("")
|
||||
continue
|
||||
|
||||
lines = read_lines_any_encoding(txt_path)
|
||||
for line in lines:
|
||||
cleaned = (line or "").strip().upper()
|
||||
if cleaned:
|
||||
data_list.append(cleaned)
|
||||
if len(cleaned) == 7:
|
||||
index_list.append(sequence_number)
|
||||
sequence_number += 1
|
||||
else:
|
||||
index_list.append("") # keep column blank if not 7 chars
|
||||
else:
|
||||
data_list.append("")
|
||||
index_list.append("")
|
||||
|
||||
print(f"Length of index_list: {len(index_list)}")
|
||||
print(f"Length of data_list: {len(data_list)}")
|
||||
|
||||
# ---------------------------------------------
|
||||
# Save to Excel
|
||||
# ---------------------------------------------
|
||||
df = pd.DataFrame({
|
||||
"Index": index_list, # will be column A
|
||||
"Content": data_list, # will be column B
|
||||
})
|
||||
|
||||
# header=False to start at column A without headers, index=False to omit row numbers
|
||||
df.to_excel(OUTPUT_XLSX, index=False, header=False)
|
||||
|
||||
print(f"Saved: {OUTPUT_XLSX}")
|
||||
49
data/server_list/excel.py.back
Normal file
49
data/server_list/excel.py.back
Normal file
@@ -0,0 +1,49 @@
|
||||
import os
|
||||
import pandas as pd
|
||||
|
||||
# server_list.txt 파일이 있는 폴더 경로
|
||||
server_list_folder = '/app/idrac_info/server_list/' # 실제 경로로 수정
|
||||
server_list_file = os.path.join(server_list_folder, 'server_list.txt')
|
||||
|
||||
# 추출할 .txt 파일들이 있는 폴더 경로
|
||||
data_files_folder = '/app/idrac_info/mac/' # 실제 경로로 수정
|
||||
|
||||
# server_list.txt 파일에서 파일명을 읽어들임
|
||||
with open(server_list_file, 'r') as f:
|
||||
file_names = f.read().splitlines()
|
||||
|
||||
# 각 파일의 내용을 저장할 리스트 생성
|
||||
data_list = []
|
||||
index_list = []
|
||||
|
||||
# 각 파일을 읽어들여 리스트에 저장
|
||||
sequence_number = 1
|
||||
for file_name in file_names:
|
||||
file_path = os.path.join(data_files_folder, f'{file_name}.txt')
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
lines = file.readlines()
|
||||
for line in lines:
|
||||
cleaned_line = line.strip().upper() # 대문자로 변환
|
||||
if cleaned_line: # 빈 라인은 무시
|
||||
data_list.append(cleaned_line)
|
||||
if len(cleaned_line) == 7:
|
||||
index_list.append(sequence_number)
|
||||
sequence_number += 1
|
||||
else:
|
||||
index_list.append('') # 7자가 아닌 경우 빈 문자열로 유지
|
||||
else:
|
||||
index_list.append('') # 빈 라인은 인덱스에도 빈 값으로 유지
|
||||
else:
|
||||
data_list.append('')
|
||||
index_list.append('') # 파일이 없을 경우 빈 문자열로 유지
|
||||
|
||||
# 데이터를 DataFrame으로 변환하여 B열부터 시작하도록 함
|
||||
df = pd.DataFrame({
|
||||
'Index': index_list,
|
||||
'Content': data_list
|
||||
})
|
||||
|
||||
# 엑셀 파일로 저장
|
||||
output_file = '/app/idrac_info/idrac_info/mac_info.xlsx'
|
||||
df.to_excel(output_file, index=False, header=False)
|
||||
3
data/server_list/guid_list.txt
Normal file
3
data/server_list/guid_list.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
1XZCZC4
|
||||
|
||||
2NYCZC4
|
||||
60
data/server_list/list.txt
Normal file
60
data/server_list/list.txt
Normal file
@@ -0,0 +1,60 @@
|
||||
DKK3674
|
||||
GFF3674
|
||||
HGK3674
|
||||
JFF3674
|
||||
2HF3674
|
||||
4MK3674
|
||||
BJF3674
|
||||
6KK3674
|
||||
2HK3674
|
||||
FKK3674
|
||||
CGF3674
|
||||
6KF3674
|
||||
4GF3674
|
||||
FJK3674
|
||||
1LK3674
|
||||
8GF3674
|
||||
FJF3674
|
||||
7HF3674
|
||||
5GF3674
|
||||
6JF3674
|
||||
8LK3674
|
||||
FDF3674
|
||||
8HK3674
|
||||
FHK3674
|
||||
5LK3674
|
||||
HHK3674
|
||||
7FF3674
|
||||
CKK3674
|
||||
3JF3674
|
||||
2GF3674
|
||||
3HF3674
|
||||
GGK3674
|
||||
6HK3674
|
||||
CJK3674
|
||||
3JK3674
|
||||
8JK3674
|
||||
FGF3674
|
||||
5HF3674
|
||||
4JF3674
|
||||
5CF3674
|
||||
282S574
|
||||
HHF3674
|
||||
DCF3674
|
||||
4FF3674
|
||||
2KF3674
|
||||
HCF3674
|
||||
8KK3674
|
||||
DHK3674
|
||||
HDF3674
|
||||
GCF3674
|
||||
5MK3674
|
||||
5FF3674
|
||||
DMK3674
|
||||
4KF3674
|
||||
BKK3674
|
||||
CLK3674
|
||||
6LK3674
|
||||
2MK3674
|
||||
4HK3674
|
||||
BLK3674
|
||||
BIN
data/server_list/mac_info.XLSM
Normal file
BIN
data/server_list/mac_info.XLSM
Normal file
Binary file not shown.
71
data/server_list/server_info_zip.py
Normal file
71
data/server_list/server_info_zip.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
# list.txt에서 파일명을 읽어오는 함수
|
||||
def read_file_list():
|
||||
list_file = os.path.join(os.getcwd(), 'list.txt')
|
||||
if not os.path.isfile(list_file):
|
||||
raise ValueError(f"'{list_file}'은(는) 파일이 아니거나 존재하지 않습니다.")
|
||||
|
||||
try:
|
||||
with open(list_file, 'r', encoding='utf-8') as f:
|
||||
return [line.strip() for line in f.readlines() if line.strip()]
|
||||
except FileNotFoundError:
|
||||
print(f"'{list_file}' 파일이 존재하지 않습니다.")
|
||||
return []
|
||||
|
||||
# 특정 폴더에서 파일을 검색하고 압축하는 함수
|
||||
def zip_selected_files(folder_path, file_list, output_zip):
|
||||
with zipfile.ZipFile(output_zip, 'w') as zipf:
|
||||
for file_name in file_list:
|
||||
# 확장자를 .txt로 고정
|
||||
file_name_with_ext = f"{file_name}.txt"
|
||||
|
||||
file_path = os.path.join(folder_path, file_name_with_ext)
|
||||
if os.path.exists(file_path):
|
||||
print(f"압축 중: {file_name_with_ext}")
|
||||
zipf.write(file_path, arcname=file_name_with_ext)
|
||||
else:
|
||||
print(f"파일을 찾을 수 없거나 지원되지 않는 파일 형식입니다: {file_name_with_ext}")
|
||||
print(f"완료: '{output_zip}' 파일이 생성되었습니다.")
|
||||
|
||||
# /app/idrac_info/backup/ 폴더 내 폴더를 나열하고 사용자 선택 받는 함수
|
||||
def select_folder():
|
||||
base_path = "/data/app/idrac_info/data/backup/"
|
||||
if not os.path.isdir(base_path):
|
||||
raise ValueError(f"기본 경로 '{base_path}'이(가) 존재하지 않습니다.")
|
||||
|
||||
folders = [f for f in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, f))]
|
||||
if not folders:
|
||||
raise ValueError(f"'{base_path}'에 폴더가 존재하지 않습니다.")
|
||||
|
||||
print("사용 가능한 폴더:")
|
||||
for idx, folder in enumerate(folders, start=1):
|
||||
print(f"{idx}. {folder}")
|
||||
|
||||
choice = int(input("원하는 폴더의 번호를 선택하세요: ").strip())
|
||||
if choice < 1 or choice > len(folders):
|
||||
raise ValueError("올바른 번호를 선택하세요.")
|
||||
|
||||
return os.path.join(base_path, folders[choice - 1])
|
||||
|
||||
# 주요 실행 코드
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
# /app/idrac_info/backup/ 폴더 내에서 폴더 선택
|
||||
folder_path = select_folder()
|
||||
|
||||
output_zip_name = input("생성할 zip 파일명을 입력하세요 (확장자 제외, 예: output): ").strip()
|
||||
|
||||
# zip 파일 경로를 현재 디렉토리로 설정
|
||||
output_zip = os.path.join(os.getcwd(), f"{output_zip_name}.zip")
|
||||
|
||||
# 파일명 리스트 가져오기
|
||||
file_list = read_file_list()
|
||||
|
||||
if not file_list:
|
||||
print("list.txt에 파일명이 없습니다.")
|
||||
else:
|
||||
zip_selected_files(folder_path, file_list, output_zip)
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
2
data/server_list/server_list.txt
Normal file
2
data/server_list/server_list.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
4XZCZC4
|
||||
|
||||
Reference in New Issue
Block a user