second commit

This commit is contained in:
Furen Xiao 2025-09-16 13:20:19 +08:00
parent d906b521a2
commit 38f54f252f
737 changed files with 135662 additions and 0 deletions

176
.gitignore vendored Normal file
View file

@ -0,0 +1,176 @@
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
components
.vscode/
whoosh_index/
*.pdf
*.seg
*.sql
*.xls
*.xlsx
sitestatic/

17
.project Normal file
View file

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>adm-ntuh-net</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

5
.pydevproject Normal file
View file

@ -0,0 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?><pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
</pydev_project>

View file

@ -0,0 +1,14 @@
eclipse.preferences.version=1
encoding//ntuh/assistant.py=utf-8
encoding//ntuh/get_inpatient.py=utf-8
encoding//ntuh/getop.py=utf-8
encoding//ntuh/ntuh/settings.py=utf-8
encoding//ntuh/ntuhgov/portal_ghost.py=utf-8
encoding//ntuh/ntuhgov/portal_spynner.py=utf-8
encoding//ntuh/registry/models.py=utf-8
encoding//ntuh/registry/tests.py=utf8
encoding//ntuh/registry/utils.py=utf-8
encoding//ntuh/registry/views.py=utf-8
encoding//ntuh/submodule/ntuhgov/portal_ghost.py=utf-8
encoding//ntuh/submodule/ntuhgov/portal_spynner.py=utf-8
encoding//ntuh/unf.py=utf-8

View file

@ -0,0 +1,3 @@
eclipse.preferences.version=1
project.repository.kind=gitlab
project.repository.url=http\://gitlab.ntuh.net/admin/adm-ntuh-net

191
IMPAX/Untitled.ipynb Normal file

File diff suppressed because one or more lines are too long

154
IMPAX/analyze_study.ipynb Normal file
View file

@ -0,0 +1,154 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from PIL import Image, ImageFilter, ImageMath\n",
"\n",
"import numpy as np\n",
"import skimage\n",
"from sklearn.feature_extraction import image\n",
"\n",
"STUDY_PATH = \"/media/cifs/shares/SRS/storage/tmp/MRI With_Without Contrast--Brain_53820330\"\n",
"\n",
"MODEL_PATH = '/home/xfr/nni/model-5-64/TwNuKtj7/best_zdoyO.pth'\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/media/cifs/shares/SRS/storage/tmp/MRI With_Without Contrast--Brain_53820330/export--873570386.jpg\n",
"<PIL.Image.Image image mode=L size=512x512 at 0x7FE087FC8350>\n",
"[[34 34 32 ... 35 34 33]\n",
" [34 43 40 ... 45 40 42]\n",
" [29 49 47 ... 42 39 47]\n",
" ...\n",
" [31 48 49 ... 45 42 41]\n",
" [33 51 49 ... 43 48 54]\n",
" [36 56 52 ... 61 58 60]]\n"
]
}
],
"source": [
"for jpg_file in sorted(os.listdir(STUDY_PATH)):\n",
" jpg_path = os.path.join(STUDY_PATH, jpg_file)\n",
" print(jpg_path)\n",
" img = Image.open(jpg_path).convert('L')\n",
" print(img)\n",
" data = np.array(img)\n",
" print(data)\n",
" \n",
"# blocks = skimage.util.shape.view_as_blocks(data, (300, 300))\n",
"# print(blocks.shape)\n",
" \n",
" break"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['BaseEstimator', 'PatchExtractor', '__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', '_compute_gradient_3d', '_compute_n_patches', '_make_edges_3d', '_mask_edges_weights', '_to_graph', 'as_strided', 'check_array', 'check_random_state', 'extract_patches', 'extract_patches_2d', 'grid_to_graph', 'img_to_graph', 'np', 'numbers', 'product', 'reconstruct_from_patches_2d', 'sparse']\n",
"(2, 2, 250, 250)\n"
]
}
],
"source": [
"print(dir(image))\n",
"# extract_patches\n",
"\n",
"patches = image.extract_patches(data, patch_shape=250, extraction_step=250)\n",
"print(patches.shape)"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(512, 512)\n"
]
}
],
"source": [
"print(data.shape)\n",
"\n",
"\n",
"# # chop it up\n",
"# I, J = map(np.arange, (200, 200), data.shape[:2], (200, 200))\n",
"# chops = [np.split(row, J, axis=1) for row in np.split(data, I, axis=0)]\n",
"\n",
"# print(I,J)\n",
"# print(len(chops))\n",
"# print(chops[0])\n",
"# # do something with the bits\n",
"\n",
"# predictions = [chop-(i+j)*(chop>>3) for j, row in enumerate(chops) for i, chop in enumerate(row)]\n",
"\n",
"# # unflatten predictions\n",
"# def nest(data, template):\n",
"# data = iter(data)\n",
"# return [[next(data) for _ in row] for row in template]\n",
"\n",
"# pred_lol = nest(predictions, chops)\n",
"\n",
"\n",
"# # almost builtin reconstruction\n",
"# def np_block_2D(chops):\n",
"# return np.block([[[x] for x in row] for row in chops])\n",
"\n",
"# recon = np_block_2D(pred_lol)\n",
"# Image.fromarray(recon).save('demo.png')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

147
IMPAX/auto.py Normal file
View file

@ -0,0 +1,147 @@
import time
from pywinauto.application import Application
# app = Application(backend="uia").start("notepad.exe")
# app['未命名 - 記事本'].type_keys("%FX")
#from pywinauto.timings import Timings
#Timings.window_find_timeout = 50.0
def legacy_properties(ctrl):
d = {}
for c in ctrl.children():
p = c.legacy_properties()
d[p['Name']] = p['Value']
return d
# app = Application().start(r"C:\Program Files (x86)\AGFA\IMPAX Client\AppStart.exe")
try:
app = Application().connect(path=r"C:\Program Files (x86)\AGFA\IMPAX Client\6.5.2.114\impax-client-main.exe")
except:
app = Application().start(r"C:\Program Files (x86)\AGFA\IMPAX Client\AppStart.exe")
app = Application().connect(path=r"C:\Program Files (x86)\AGFA\IMPAX Client\6.5.2.114\impax-client-main.exe")
app2 = Application('uia', allow_magic_lookup=False).connect(path=r"C:\Program Files (x86)\AGFA\IMPAX Client\6.5.2.114\impax-client-main.exe")
# print( app.exists(['IMPAX 6.5.2.114 Enterprise Unlimited']))
# exit()
# if not app['IMPAX 6.5.2.114 Enterprise Unlimited'].exists('重新設定'):
def login():
time.sleep(1)
# app['IMPAX 6.5.2.114'].print_ctrl_ids(filename='ids_login.txt')
app['IMPAX 6.5.2.114']['使用者識別碼:Edit'].type_keys('004552')
time.sleep(1)
app['IMPAX 6.5.2.114']['密碼:Edit'].type_keys('n122119493')
app['IMPAX 6.5.2.114']['登入'].click()
time.sleep(20)
def fetch(patient_id, modality='MR'):
# print(dir(app['IMPAX 6.5.2.114 Enterprise Unlimited']))
try:
if app['IMPAX 6.5.2.114 Enterprise Unlimited'].exists('登出'):
pass
except:
login()
#print(dir(app['IMPAX 6.5.2.114 Enterprise Unlimited']['登出'].parent()))
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['登出'].parent().click_input()
#try:
# app['IMPAX 6.5.2.114 Enterprise Unlimited']['登出'].parent().click_input()
#except:
# login()
#app['IMPAX 6.5.2.114 Enterprise Unlimited'].wait('visible')
if not app['IMPAX 6.5.2.114 Enterprise Unlimited'].exists('重新設定'):
app['IMPAX 6.5.2.114 Enterprise Unlimited']['登出'].parent().click_input()
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['重新設定'].wait('enabled')
#app['IMPAX 6.5.2.114 Enterprise Unlimited'].window(auto_id="resetButton").click()
#app['IMPAX 6.5.2.114 Enterprise Unlimited'].print_ctrl_ids(depth=3, filename='ids_uia.txt')
app['IMPAX 6.5.2.114 Enterprise Unlimited'].print_ctrl_ids(filename='ids_uia.txt')
app['IMPAX 6.5.2.114 Enterprise Unlimited']['重新設定'].click()
app['IMPAX 6.5.2.114 Enterprise Unlimited']['病歷號碼Edit'].type_keys(patient_id)
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['影像檢查設備ComboBox'].click_input()
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['影像檢查設備ComboBox'].select(modality)
app['IMPAX 6.5.2.114 Enterprise Unlimited']['搜尋Button'].click()
#print(dir(app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_images']))
#print(app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_images'].children())
#print(app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].get_properties())
#print(app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].legacy_properties())
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].print_control_identifiers()
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].capture_as_image().save('1.png')
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].click_input()
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].capture_as_image().save('2.png')
#app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].print_control_identifiers()
#print(app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'])
#print(app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].get_properties())
#print(app['IMPAX 6.5.2.114 Enterprise Unlimited']['study.num_imagesHeader'].legacy_properties())
study_list = []
#app2 = Application('uia').connect(path=r"C:\Program Files (x86)\AGFA\IMPAX Client\6.5.2.114\impax-client-main.exe")
app2['IMPAX 6.5.2.114 Enterprise Unlimited'].print_ctrl_ids(filename='ids_uia.txt')
#print(dir(app2['IMPAX 6.5.2.114 Enterprise Unlimited']))
#for c in app2['IMPAX 6.5.2.114 Enterprise Unlimited']['描述內容Pane'].children():
#for c in app2['IMPAX 6.5.2.114 Enterprise Unlimited'].child_window(auto_id="grid", control_type="Pane").children():
for c in app2['IMPAX 6.5.2.114 Enterprise Unlimited'].child_window(title="描述內容", control_type="Pane").children():
# print(c)
p = legacy_properties(c)
if 'patient.patient_id' in p and p['patient.patient_id'] == patient_id:
# print(dir(c))
# print(c.legacy_properties())
print(c.texts()[0], p['View'], p['study.accession_number'], p['study.modality'], p['study.study_date'], p['study.num_images'], p['study.study_description'])
# exit()
if int(p['study.num_images'])>10:
study_list.append((c.texts()[0], p))
exit()
# Available
# VIEWABLE
# VIEWING
# nearline
for c, p in study_list:
print(c)
ctrl = app['IMPAX 6.5.2.114 Enterprise Unlimited'][c].children()[0]
# print(ctrl.texts())
# print(dir(ctrl))
app['IMPAX 6.5.2.114 Enterprise Unlimited'][c].children()[0].double_click_input()
# time.sleep(10)
# app['IMPAX 6.5.2.114 Enterprise Unlimited'].print_ctrl_ids(filename='ids_view.txt')
exit()
fetch('3009684')
#fetch('2228813')
#fetch('2541371', 'CT')

44
IMPAX/ck-download.py Normal file
View file

@ -0,0 +1,44 @@
import datetime
import time
from sqlalchemy.ext.automap import automap_base
# from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from download import getPatient
from models import *
MAX_EXE_TIME = 3600 # 1 hour
rescan = False
session = Session()
Base = automap_base()
engine = create_engine('mysql+pymysql://ntuh:n122119493@virtualmin.ntuh.net/ck', pool_recycle=3600)
Base.prepare(engine, reflect=True)
SessionCK = sessionmaker(bind=engine)
session_ck = SessionCK()
start_time = time.time()
CKPatient = Base.classes.ck_patient
for p in session_ck.query(CKPatient):
print(p.id, p.medical_records)
p2 = session.query(Patient).get(p.medical_records)
if p2 is None:
# print(p.id, p.medical_records)
if rescan == True:
getPatient(p.medical_records, rescanStudy = rescan)
# exit()
else:
if rescan == False:
getPatient(p.medical_records, rescanStudy = rescan)
# print('skipped', p2.patient_id, p2.counter, p2.saved)
diff_time = time.time() - start_time
str_time = str(datetime.timedelta(seconds=diff_time))
print(f'Exec time {str_time}')
if diff_time > MAX_EXE_TIME:
break

199
IMPAX/dataset.py Normal file
View file

@ -0,0 +1,199 @@
import os
import random
from PIL import Image, ImageFilter, ImageMath
from scipy import ndimage
import numpy as np
import torch
PATCH_SIZE = 256
# JPGDIR = '/media/nfs/SRS/IMPAX/'
# JPGDIR = '/shares/Public/IMPAX/'
def img_frombytes(data):
size = data.shape[::-1]
databytes = np.packbits(data, axis=1)
return Image.frombytes(mode='1', size=size, data=databytes)
def getpatch(width, height):
w = random.randint(0, width-1)//PATCH_SIZE * PATCH_SIZE
if w > width - PATCH_SIZE:
w = width - PATCH_SIZE
h = random.randint(0, height-1)//PATCH_SIZE * PATCH_SIZE
if h > height - PATCH_SIZE:
h = height - PATCH_SIZE
return w, h
class IMPAXDataset(object):
def __init__(self, JPGDIR):
# self.root = root
# self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
self.ST_90 = []
self.ST_100 = []
self.ST_AN = []
self.ST_TXT = []
self.MAXSHAPE = None
self.MAXSIZE = 0
self.MINSHAPE = None
self.MINSIZE = 9999 * 9999
self.gets = 0
for pid in sorted(os.listdir(JPGDIR)):
PATDIR = os.path.join(JPGDIR, pid)
for study in sorted(os.listdir(PATDIR)):
if study.endswith('_100'):
ST100_DIR = os.path.join(PATDIR, study)
TXT_DIR = ST100_DIR.replace('_100', '_TXT')
os.makedirs(TXT_DIR, exist_ok=True)
for jpg in sorted(os.listdir(ST100_DIR)):
jpg_path = os.path.join(ST100_DIR, jpg)
txt_path = jpg_path.replace('_100', '_TXT').replace('.jpg', '.png')
self.ST_100.append(jpg_path)
self.ST_90.append(jpg_path.replace('_100', '_90'))
self.ST_AN.append(jpg_path.replace('_100', '_AN'))
self.ST_TXT.append(txt_path)
if os.path.isfile(txt_path):
continue
img = Image.open(jpg_path).convert('L')
width, height = img.size
size = width * height
if self.MAXSIZE < size:
self.MAXSIZE = size
self.MAXSHAPE = width, height
if self.MINSIZE > size:
self.MINSIZE = size
self.MINSHAPE = width, height
if os.path.isfile(txt_path):
continue
jpg_ndarray = np.array(img)
# CC = (0xCB <= jpg_ndarray <= 0xCD)
CC = np.logical_and(jpg_ndarray >= 0xCB, jpg_ndarray <= 0xCD)
C0 = (jpg_ndarray <= 0x01)
MASK = np.logical_or(CC, C0)
MASK = np.roll(MASK, -1, 0)
MASK = np.roll(MASK, -1, 1)
# MASKED = np.logical_and(CC, MASK).astype('uint8') * 255
MASKED = np.logical_and(CC, MASK).astype('uint8')
FILTERD = ndimage.rank_filter(MASKED, rank=-2, size=3)
FILTERD = np.minimum(MASKED, FILTERD)
im = img_frombytes(FILTERD)
im.save (txt_path)
if self.MINSHAPE:
print(self.MINSHAPE)
if self.MAXSHAPE:
print(self.MAXSHAPE)
def __getitem__(self, idx):
# self.gets += 1
# print(self.gets)
st_90 = Image.open(self.ST_90[idx]).convert('L')
st_AN = Image.open(self.ST_AN[idx]).convert('L')
st_TX = Image.open(self.ST_TXT[idx]).convert('L')
width, height = st_90.size
# print(idx, ST_90[idx])
w, h = getpatch(width, height)
# print(w, h)
s2_90 = np.array(st_90)[np.newaxis, h:h+PATCH_SIZE, w:w+PATCH_SIZE]
s2_AN = np.array(st_AN)[h:h+PATCH_SIZE, w:w+PATCH_SIZE]
s2_TX = np.array(st_TX)[h:h+PATCH_SIZE, w:w+PATCH_SIZE]
s2_AN_TX = np.stack( (s2_AN,s2_TX) )
# print(s2_90.shape, s2_AN_TX.shape)
# exit()
# print(s2_90)
# exit()
# return s2_90, s2_AN
# return s2_90[np.newaxis, :, :], s2_AN[np.newaxis, :, :]
return torch.from_numpy(s2_90).float(), torch.from_numpy(s2_AN_TX).float()
# load images ad masks
img_path = os.path.join(self.root, "PNGImages", self.imgs[idx])
mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
# convert the PIL Image into a numpy array
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.ST_100)

665
IMPAX/download.py Normal file
View file

@ -0,0 +1,665 @@
from pathlib import PureWindowsPath
import base64
import datetime
import re
import time
from appium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from models import *
MIN_NUM_IMAGES = 9
RootPath = PureWindowsPath(r'S:\storage\0')
RunStamp = base64.urlsafe_b64encode(int(time.time()).to_bytes(4,'big')).decode().replace('=','')
RunPath = RootPath/RunStamp
print(RunStamp, RunPath)
session = Session()
def launchApp(desired_caps):
# desired_caps["ms:experimental-webdriver"] = True
# dut_url = "http://win10z:4723"
dut_url = "http://192.168.11.101:4723"
driver = webdriver.Remote(
command_executor = dut_url,
desired_capabilities = desired_caps)
return driver
def getDriverFromWin( win):
win_handle1 = win.get_attribute("NativeWindowHandle")
win_handle = format(int(win_handle1), 'x') # convert to hex string
# Launch new session attached to the window
desired_caps = {}
desired_caps["appTopLevelWindow"] = win_handle
driver = launchApp(desired_caps)
driver.switch_to.window(win_handle)
return driver
def getDict(row):
d = {}
# num_images = row.find_elements_by_name('study.num_images')
# if not num_images or int(num_images[0] < 9):
# return d
# for e in row.find_elements_by_xpath('//DataItem'):
for e in row.find_elements_by_tag_name('DataItem'):
# d[e.get_attribute('Name')] = e.get_attribute('Value.Value')
name = e.get_attribute('Name').strip()
value = e.get_attribute('Value.Value')
if value:
value = value.strip()
d[name] = value
# if name == 'study.num_images' and int(value)<MIN_NUM_IMAGES:
# return d
return d
# Launch a driver for the Windows Desktop (Root)
desired_caps = {}
desired_caps["app"] = "Root"
desktop = launchApp(desired_caps)
# Input and check (Chinese?)
def edit_input(control, text):
control.send_keys(text)
time.sleep(1)
if control.text != text: # Chinese input method?
control.send_keys(Keys.LEFT_SHIFT)
control.send_keys(Keys.BACKSPACE * len(text))
control.send_keys(text)
if control.text != text:
print('Error in edit_input',control,text)
exit()
control.send_keys(Keys.ALT, Keys.F4)
def CloseItemPanel():
for i in range(9):
OpenItemPanel = desktop.find_elements_by_accessibility_id("OpenItemPanel")
if not OpenItemPanel:
break
print('CloseItemPanel', i)
ActionChains(desktop).move_to_element(OpenItemPanel[0]).perform()
button = OpenItemPanel[0].find_element_by_xpath('//Button')
button.click()
# exit()
def check_error():
button = desktop.find_elements_by_xpath('*/Window[@Name="錯誤"]/Button[@Name="確定"]')
if button:
print (button[0].text)
button[0].send_keys(Keys.ENTER)
# view = WebDriverWait(desktop, 20).until(
# EC.presence_of_element_located((By.ACCESSIBILITY_ID, "LoginView"))
# )
button = desktop.find_elements_by_xpath('*/Window[@Name="發生錯誤"]/Button[@Name="確定"]')
if button:
print (button[0].text)
button[0].send_keys(Keys.ENTER)
view = WebDriverWait(desktop, 20).until(
EC.presence_of_element_located((By.ACCESSIBILITY_ID, "LoginView"))
)
view = desktop.find_elements_by_accessibility_id('ApplicationView')
# Close Viewer
CloseItemPanel()
# if view:
# view[0].send_keys(Keys.ALT, Keys.F4)
# exit()
# Login if necessary, return ApplicationView
def login():
ID = '004552'
password = 'n122119493'
win = desktop.find_elements_by_accessibility_id('ApplicationView')
if win:
win[0].send_keys(Keys.ALT, Keys.F4)
time.sleep(5)
# view = desktop.find_element_by_accessibility_id('LoginView')
# Start app as Administrator
try:
view = desktop.find_element_by_accessibility_id('LoginView')
except:
desired_caps = {}
desired_caps["app"] = r"C:\Program Files (x86)\AGFA\IMPAX Client\6.5.2.114\impax-client-main.exe"
view = launchApp(desired_caps).find_element_by_accessibility_id('LoginView')
userid = view.find_element_by_accessibility_id('userIDText')
edit_input(userid, ID)
# userid.send_keys(ID)
# if userid.text != ID: # Chinese input method?
# userid.send_keys(Keys.LEFT_SHIFT)
# userid.send_keys(Keys.BACKSPACE * len(ID))
# userid.send_keys(ID)
view.find_element_by_accessibility_id('passwordText').send_keys(password)
view.find_element_by_accessibility_id('loginButton').click()
win = WebDriverWait(desktop, 20).until(
EC.presence_of_element_located((By.ACCESSIBILITY_ID, "ApplicationView"))
)
return win
# win = desktop.find_element_by_name('IMPAX 6.5.2.114 Enterprise Unlimited')
# print(win)
# driver = getDriverFromWin(win)
# print(driver)
check_error() # also kill ApplicationView
win = login()
# try:
# win = desktop.find_element_by_accessibility_id('ApplicationView')
# except:
# login()
# win = WebDriverWait(desktop, 20).until(
# EC.presence_of_element_located((By.ACCESSIBILITY_ID, "ApplicationView"))
# )
# print(win)
driver = getDriverFromWin(win)
# driver = win #AttributeError: 'WebElement' object has no attribute 'w3c' (ActionChains)
print(driver)
def saveSeries(quality, exportPath):
position = driver.find_element_by_accessibility_id('StudyImageViewer').find_element_by_name("Screen_0_format_5_Position_0_0")
# ActionChains(driver).context_click(driver.find_element_by_name("Screen_0_format_5_Position_0_0")).perform()
ActionChains(driver).context_click(position).perform()
# try:
# DisplayMenuForm = desktop.find_element_by_accessibility_id('DisplayMenuForm0')
# except:
# DisplayMenuForm = desktop.find_element_by_accessibility_id('DisplayMenuForm0')
DisplayMenuForm = WebDriverWait(desktop, 20).until(
EC.presence_of_element_located((By.ACCESSIBILITY_ID, "DisplayMenuForm0"))
)
# time.sleep(1)
# # desktop.find_elements_by_xpath('*')
# ActionChains(desktop).send_keys(Keys.ARROW_DOWN).perform()
# ActionChains(desktop).send_keys(Keys.ARROW_DOWN).perform()
# DisplayMenuForm = desktop.find_element_by_accessibility_id('DisplayMenuForm0')
# desktop2 = launchApp(desired_caps)
# DisplayMenuForm = desktop.find_element_by_accessibility_id('DisplayMenuForm0')
# DisplayMenuForm = desktop.find_element_by_name('DisplayMenuForm')
# print(DisplayMenuForm, DisplayMenuForm.tag_name, DisplayMenuForm.text)
ActionChains(desktop).move_to_element(DisplayMenuForm.find_element_by_name('輸出')).perform()
ActionChains(desktop).click(DisplayMenuForm.find_element_by_name('影像序列')).perform()
# export = WebDriverWait(DisplayMenuForm, 20).until(
# # EC.presence_of_element_located((By.NAME, "影像序列"))
# EC.element_to_be_clickable((By.NAME, "影像序列"))
# )
# export.click()
# DisplayMenuForm.find_element_by_name('影像序列').click()
view = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.ACCESSIBILITY_ID, "JPEGDlgView"))
)
# view = driver.find_element_by_accessibility_id('JPEGDlgView')
cb = view.find_element_by_xpath('//ComboBox[2]')
edit = cb.find_element_by_xpath('//Edit')
up = cb.find_element_by_name('Up')
down = cb.find_element_by_name('Down')
while int(edit.get_attribute('Value.Value')) < quality:
up.click()
while int(edit.get_attribute('Value.Value')) > quality:
down.click()
export = view.find_element_by_xpath(r'//Edit[@Name="匯出位置:"]')
while len(export.text):
export.send_keys(Keys.BACKSPACE)
export.send_keys(str(exportPath)+'\\')
# exit()
driver.find_element_by_accessibility_id('btnOK').click()
driver.find_element_by_accessibility_id('btnCancel').click()
# desktop2.close()
# desktop2.quit()
def screen_1x1():
# top = WebDriverWait(driver, 600).until(
# EC.presence_of_element_located((By.ACCESSIBILITY_ID, "TOP_TOOLBAR0"))
# )
box = desktop.find_elements_by_accessibility_id('GenericDialogBox')
if box:
box[0].find_element_by_name("確定").click()
# if driver.find_element_by_accessibility_id('StudyImageViewer').find_elements_by_name("Screen_0_format_5_Position_0_0"):
# return
top = driver.find_element_by_accessibility_id("TOP_TOOLBAR0")
button = top.find_element_by_xpath('//Button[5]')
ActionChains(driver).click(button).perform()
AfxWnd90u = driver.find_element_by_xpath(r'//Pane[@ClassName="AfxWnd90u"][1]')
ActionChains(driver).click(AfxWnd90u).perform()
return
# desktop = launchApp({'app': 'Root'})
# # position = driver.find_element_by_accessibility_id('StudyImageViewer').find_element_by_name("Screen_0_format_5_Position_0_0")
# box = desktop.find_elements_by_accessibility_id('GenericDialogBox')
# if box:
# box[0].find_element_by_name("確定").click()
# for i in range(3):
# desktop = launchApp({'app': 'Root'})
# win = desktop.find_element_by_accessibility_id('ApplicationView')
# d2 = getDriverFromWin(win)
# StudyImageViewer = d2.find_elements_by_accessibility_id('StudyImageViewer')
# if StudyImageViewer:
# position = StudyImageViewer[0].find_elements_by_name("Screen_0_format_5_Position_0_0")
# if position:
# break
# print('screen 1x1', i)
# top = d2.find_elements_by_accessibility_id("TOP_TOOLBAR0")
# if top:
# button = top[0].find_element_by_xpath('//Button[5]')
# ActionChains(d2).click(button).perform()
# AfxWnd90u = d2.find_element_by_xpath(r'//Pane[@ClassName="AfxWnd90u"][1]')
# ActionChains(d2).click(AfxWnd90u).perform()
pattern_series_uid = r'series_uid ] = "([.0-9T]+)"'
prog_series_uid = re.compile(pattern_series_uid)
def getSeriesDocument():
screen_1x1()
position = driver.find_element_by_accessibility_id('StudyImageViewer').find_element_by_name("Screen_0_format_5_Position_0_0")
# position = screen_1x1()
ActionChains(driver).key_down(Keys.LEFT_CONTROL).context_click(position).key_down(Keys.LEFT_CONTROL).perform()
ActionChains(desktop).send_keys(Keys.ARROW_DOWN).perform()
ActionChains(desktop).send_keys(Keys.ARROW_DOWN).perform()
DisplayMenuForm = desktop.find_element_by_accessibility_id('DisplayMenuForm0')
ActionChains(desktop).move_to_element(DisplayMenuForm.find_element_by_name('資訊')).perform()
# ActionChains(desktop).click(DisplayMenuForm.find_element_by_name('服務視窗')).perform()
DisplayMenuForm.find_element_by_name('服務視窗').click()
# DisplayMenuForm.find_element_by_accessibility_id('lr_informationStrip').click()
# DisplayMenuForm.find_element_by_accessibility_id('資訊Strip_SubMenu').click()
position.click()
tool = driver.find_element_by_name("Display Service Tool")
doc = []
for e in tool.find_elements_by_xpath("//Document"):
# print(e, e.tag_name, e.text, e.get_attribute('AutomationId'))
doc.append(e.text)
result = prog_series_uid.search(doc[1])
print(result)
# [ series, date_time_modified ] = 1453532533 ( 4, 4 )\r\n
# [ series, series_number ] = "3" ( 1, 2 )\r\n
# [ series, manufacturer ] = "GE MEDICAL SYSTEMS" ( 18, 19 )\r\n
# [ series, series_ref ] = 31044392 ( 8, 8 )\r\n
# [ series, manufacturer_model_name ] = "Signa HDxt" ( 10, 11 )\r\n
# [ series, series_time ] = "150149" ( 6, 7 )\r\n
# [ series, series_uid ] = "1.2.840.113619.2.244.6945.1127238.26918.1453507651.969" ( 54, 55 )\r\n
# [ series, modality ] = "MR" ( 2, 3 )\r\n
# [ series, station_name ] = "CGEMR1C1" ( 8, 9 )\r\n
# [ series, study_ref ] = 29907821 ( 8, 8 )\r\n
# [ series, series_date ] = "20160123" ( 8, 9 )\r\n
# [ series, date_time_created ] = 1453532533 ( 4, 4 )\r\n
# [ series, series_description ] = "Ax T2 FLAIR" ( 11, 12 )
tool.find_element_by_accessibility_id('Close').click()
ActionChains(driver).context_click(position).perform()
return result[1], doc
def getStudyByViewer(accessionNumber, studyPath):
# ### Get study report last
# driver.find_element_by_accessibility_id('ShowTextArea').click()
# # driver.find_element_by_accessibility_id('reportTab').click()
# report = driver.find_element_by_accessibility_id('reportControl').find_element_by_xpath('//Document').text
# driver.find_element_by_accessibility_id('_textAreaHideButton').click()
# print(report)
# exit()
# time.sleep(1)
# Make it 1x1 view, Try to moved to saveSeries and getSeriesDocument?
# driver.find_element_by_accessibility_id("TOP_TOOLBAR0").find_element_by_xpath('//Button[5]').click()
# driver.find_element_by_xpath(r'//Pane[@ClassName="AfxWnd90u"][1]').click()
# top = WebDriverWait(driver, 20).until(
# EC.presence_of_element_located((By.ACCESSIBILITY_ID, "TOP_TOOLBAR0"))
# )
screen_1x1()
# top = win.find_element_by_accessibility_id("TOP_TOOLBAR0")
# button = top.find_element_by_xpath('//Button[5]')
# button.click()
# AfxWnd90u = win.find_element_by_xpath(r'//Pane[@ClassName="AfxWnd90u"][1]')
# AfxWnd90u.click()
# driver = getDriverFromWin(win)
items = driver.find_elements_by_xpath(r'//ListItem[@AutomationId="ListViewItem-0"]/../ListItem')
if len(items) == 0:
toolbar = driver.find_element_by_accessibility_id("STANDARD_TOOLBAR0")
toolbar.find_element_by_xpath('//Button[1]').click()
items = driver.find_elements_by_xpath(r'//ListItem[@AutomationId="ListViewItem-0"]/../ListItem')
num_images = 0
for c in driver.find_elements_by_xpath(r'//ListItem[@AutomationId="ListViewItem-0"]/../ListItem'):
# AutomationId =
print(c.get_attribute('AutomationId'), c.tag_name, c.text)
ActionChains(driver).double_click(c).perform()
name = c.text
num_images += int(name.split(' : ')[1])
uid, doc = getSeriesDocument()
# print(doc)
# print(uid)
s = session.query(Series).get(uid)
if s is None:
s = Series(series_uid = uid)
session.add(s)
s.name = name
s.document0 = doc[0]
s.document1 = doc[1]
s.accession_number = accessionNumber
# p.saved = datetime.datetime.now()
session.commit()
# Save Demographics shown
saveSeries(100, studyPath / 'shown' / s.slugify())
driver.find_element_by_accessibility_id("TOP_TOOLBAR0").find_element_by_xpath('//Button[1]').click()
# Save Demographics hidden
saveSeries(100, studyPath / 'hidden' / s.slugify())
driver.find_element_by_accessibility_id("TOP_TOOLBAR0").find_element_by_xpath('//Button[1]').click()
### Get study report last
driver.find_element_by_accessibility_id('ShowTextArea').click()
try:
report = driver.find_element_by_accessibility_id('reportControl').find_element_by_xpath('//Document').text
except:
report = None
driver.find_element_by_accessibility_id('_textAreaHideButton').click()
s = session.query(Study).get(accessionNumber)
if s is None:
s = Study(accession_number = accessionNumber)
session.add(s)
s.report = report
if s.num_images == num_images and (report or (datetime.date.today()-s.study_date).days>99):
s.success = True
s.run = RunStamp
session.commit()
def getStudyByGrid(study, patientPath):
# print(patientPath)
# exit()
# print(driver.find_element_by_accessibility_id("TOP_TOOLBAR0"))
driver.find_element_by_accessibility_id("resetButton").click()
# driver.find_element_by_accessibility_id("accessionNumberText").find_element_by_xpath(r'//Edit[@Name="檢查流水號"]').send_keys(accessionNumber)
accession = win.find_element_by_accessibility_id('accessionNumberText').find_element_by_tag_name('Edit')
edit_input(accession, study.accession_number.replace('@', ''))
# accession.send_keys(study.accession_number)
# time.sleep(1)
# if accession.text != study.accession_number: # Chinese input method?
# accession.send_keys(Keys.LEFT_SHIFT)
# accession.send_keys(Keys.BACKSPACE * len(study.accession_number))
# accession.send_keys(study.accession_number)
driver.find_element_by_accessibility_id("searchButton").click()
grid = driver.find_element_by_accessibility_id("grid")
# print(1)
# for i, e in enumerate(grid.find_elements_by_xpath('*')):
# print(e, e.tag_name, e.text)
# exit()
# print(2)
for i, e in enumerate(grid.find_elements_by_xpath('//DataItem[@Name="View"]/..')):
print(i, e.tag_name)
d = getDict(e)
print(d)
if 'study.num_images' not in d or int(d['study.num_images']) < MIN_NUM_IMAGES:
continue
# 取回 nearline, But dont wait
if d['View'] == 'nearline':
ActionChains(driver).context_click(e.find_element_by_name('View')).perform()
# desktop.find_element_by_xpath(r'Pane/Menu[@Name="路徑位置"]/MenuItem[@Name="取回"]').click()
desktop.find_element_by_name('路徑位置').find_element_by_name("取回").click() # faster
print('skip nearline',study.accession_number)
return
ActionChains(driver).double_click(e.find_element_by_name('View')).perform()
# print(study.naming())
# exit()
# getStudyByViewer(study.accession_number, patientPath/(datetime.datetime.strptime(d['study.study_date'], '%Y/%m/%d').strftime('%Y-%m-%d-')+accessionNumber))
getStudyByViewer(study.accession_number, patientPath/study.naming())
#close image viewer
CloseItemPanel()
# exit()
# "/Pane[@ClassName=\"#32769\"][@Name=\"桌面 1\"]/Window[@Name=\"IMPAX 6.5.2.114 Enterprise Unlimited\"][@AutomationId=\"ApplicationView\"]/Pane[@AutomationId=\"displayPanel\"]/Pane[@AutomationId=\"DisplayView\"]/Pane[@Name=\"TOP_TOOLBAR0\"][starts-with(@AutomationId,\"TOP_TOOLBAR\")]/Button[starts-with(@ClassName,\"WindowsForms10\")]";
def getPatient(ID, rescanStudy = True):
if rescanStudy:
driver.find_element_by_accessibility_id("resetButton").click()
# patientId = win.find_element_by_accessibility_id('patientIdText').find_element_by_xpath(r'//Edit[@Name="病歷號碼"]')
# patientId = win.find_element_by_accessibility_id('patientIdText').find_element_by_tag_name('Edit')
patientId = driver.find_element_by_accessibility_id('patientIdText').find_element_by_tag_name('Edit')
edit_input(patientId, ID)
# patientId.send_keys(ID)
# time.sleep(1)
# if patientId.text != ID: # Chinese input method?
# patientId.send_keys(Keys.LEFT_SHIFT)
# patientId.send_keys(Keys.BACKSPACE * len(ID))
# patientId.send_keys(ID)
# driver.find_element_by_accessibility_id("patientIdText").find_element_by_xpath(r'//Edit[@Name="病歷號碼"]').send_keys(patientId)
driver.find_element_by_accessibility_id("searchButton").click()
grid = driver.find_element_by_accessibility_id("grid")
try:
DownButton = grid.find_element_by_accessibility_id("DownButton")
except:
DownButton = None
print(1)
# for i, e in enumerate(grid.find_elements_by_xpath('*/*')):
# for i, e in enumerate(grid.find_elements_by_xpath('//DataItem[@Name="View"]/..')):
for i, e in enumerate(grid.find_elements_by_xpath('*/Custom')):
print(i, e.tag_name, e.text)
if not e.text.isdigit():
continue
d = getDict(e)
print(d)
if ('study.accession_number' not in d) or not d['study.accession_number']:
continue
p = session.query(Patient).get(d['patient.patient_id'])
if p is None:
p = Patient(patient_id = d['patient.patient_id'])
session.add(p)
p.patient_name = d['patient.patient_name']
p.patient_name_ph_utf8 = d['patient.patient_name_ph_utf8']
# p.saved = datetime.datetime.now()
session.commit()
s = session.query(Study).get(d['study.accession_number'])
if s is None:
s = Study(accession_number = d['study.accession_number'])
session.add(s)
# else:
# #skip study already success
# if s.success:
# continue
s.date_time_created = d['study.date_time_created']
s.modality = d['study.modality']
s.num_images = d['study.num_images']
s.status = d['study.status']
s.study_date = datetime.datetime.strptime(d['study.study_date'], '%Y/%m/%d')
s.study_description = d['study.study_description']
s.study_time = d['study.study_time']
s.patient_id = d['patient.patient_id']
# p.saved = datetime.datetime.now()
session.commit()
# if int(d['study.num_images']) < MIN_NUM_IMAGES:
# continue
# if 'View'in d and d['View']==None:
# driver.find_element_by_accessibility_id("DownButton").click()
# exit()
# continue
if (not s.success) and s.num_images > MIN_NUM_IMAGES:
# if d['View'] not in ['Available', 'VIEWABLE', 'VIEWING']:
if d['View'] == 'nearline':
ActionChains(driver).context_click(e.find_element_by_name('View')).perform()
# desktop.find_element_by_xpath(r'Pane/Menu[@Name="路徑位置"]/MenuItem[@Name="取回"]').click()
desktop.find_element_by_name('路徑位置').find_element_by_name("取回").click() # faster
# getDriverFromWin(desktop.find_element_by_name('路徑位置')).find_element_by_name("取回").click()
# print(desktop.find_element_by_xpath(r'Pane/Menu[@Name="路徑位置"]/MenuItem[@Name="取回"]'))
# exit()
if DownButton:
DownButton.click()
if DownButton:
UpPageButton = grid.find_element_by_accessibility_id("UpPageButton")
try:
while True:
UpPageButton.click()
except:
pass
if p.counter is None:
p.counter = 1
else:
p.counter += 1
session.commit()
for study in session.query(Patient).get(ID).studies:
if study.success or study.num_images < MIN_NUM_IMAGES:
continue
getStudyByGrid(study, RunPath/ID)
if __name__ == '__main__':
rescan = True
getPatient('3009684', rescanStudy=rescan)
# getPatient('2347157', rescanStudy=rescan)
# getStudyByViewer('T0159343504', r'S:\storage\0')
# getStudyByGrid('T0159343504', r'S:\storage\0')

1296
IMPAX/ids.txt Normal file

File diff suppressed because it is too large Load diff

68
IMPAX/ids_login.txt Normal file
View file

@ -0,0 +1,68 @@
Control Identifiers:
Dialog - 'IMPAX 6.5.2.114' (L662, T244, R1243, B702)
['Dialog', 'IMPAX 6.5.2.114', 'IMPAX 6.5.2.114Dialog']
child_window(title="IMPAX 6.5.2.114", auto_id="LoginView", control_type="Window")
|
| Pane - '' (L987, T270, R1231, B704)
| ['Pane', '使用者識別碼:Pane', 'Pane0', 'Pane1', '使用者識別碼:Pane0', '使用者識別碼:Pane1']
| child_window(auto_id="authenticationPanel", control_type="Pane")
| |
| | Pane - '' (L987, T270, R1231, B676)
| | ['Pane2', '使用者識別碼:Pane2']
| | child_window(auto_id="AuthenticationView", control_type="Pane")
| | |
| | | Static - '' (L987, T477, R1243, B511)
| | | ['Static', 'Static0', 'Static1']
| | | child_window(auto_id="failedLoginLabel", control_type="Text")
| | |
| | | Edit - '' (L987, T275, R1231, B475)
| | | ['Edit', 'Edit0', 'Edit1']
| | | child_window(auto_id="messageTextBox", control_type="Edit")
| | |
| | | Edit - '' (L987, T593, R1171, B617)
| | | ['密碼:Edit', 'Edit2']
| | | child_window(auto_id="passwordText", control_type="Edit")
| | |
| | | Edit - '' (L987, T540, R1171, B564)
| | | ['使用者識別碼:Edit', 'Edit3']
| | | child_window(auto_id="userIDText", control_type="Edit")
| | |
| | | Button - '登入' (L987, T627, R1033, B656)
| | | ['登入', '登入Button', 'Button', 'Button0', 'Button1']
| | | child_window(title="登入", auto_id="loginButton", control_type="Button")
| | |
| | | Button - '選項 >>' (L1043, T627, R1119, B656)
| | | ['選項 >>', '選項 >>Button', 'Button2']
| | | child_window(title="選項 >>", auto_id="optionsButton", control_type="Button")
| | |
| | | Static - '密碼:' (L987, T569, R1028, B591)
| | | ['密碼:', 'Static2', '密碼:Static']
| | | child_window(title="密碼:", auto_id="passwordLabel", control_type="Text")
| | |
| | | Static - '使用者識別碼:' (L987, T516, R1091, B538)
| | | ['Static3', '使用者識別碼:', '使用者識別碼:Static']
| | | child_window(title="使用者識別碼:", auto_id="userIDLabel", control_type="Text")
|
| Pane - '' (L670, T270, R977, B704)
| ['Pane3']
| child_window(auto_id="graphicPanel", control_type="Pane")
| |
| | Pane - '' (L670, T270, R977, B704)
| | ['Pane4']
| | child_window(auto_id="splashPicture", control_type="Pane")
|
| TitleBar - '' (L681, T247, R1240, B270)
| ['TitleBar']
| |
| | Menu - '系統' (L670, T252, R692, B274)
| | ['Menu', '系統Menu', '系統', '系統0', '系統1']
| | child_window(title="系統", auto_id="MenuBar", control_type="MenuBar")
| | |
| | | MenuItem - '系統' (L670, T252, R692, B274)
| | | ['MenuItem', '系統MenuItem', '系統2']
| | | child_window(title="系統", control_type="MenuItem")
| |
| | Button - '關閉' (L1207, T245, R1241, B270)
| | ['關閉Button', '關閉', 'Button3']
| | child_window(title="關閉", control_type="Button")

419
IMPAX/ids_view.txt Normal file
View file

@ -0,0 +1,419 @@
Control Identifiers:
Dialog - 'IMPAX 6.5.2.114 Enterprise Unlimited' (L0, T0, R1920, B954)
['Dialog', 'IMPAX 6.5.2.114 Enterprise UnlimitedDialog', 'IMPAX 6.5.2.114 Enterprise Unlimited', 'Dialog0', 'Dialog1']
child_window(title="IMPAX 6.5.2.114 Enterprise Unlimited", auto_id="ApplicationView", control_type="Window")
|
| Dialog - 'T0159343504' (L8, T149, R1912, B354)
| ['Dialog2', 'T0159343504', 'T0159343504Dialog', 'T01593435040', 'T01593435041']
| child_window(title="T0159343504", control_type="Window")
| |
| | Slider - '1' (L16, T180, R1624, B198)
| | ['2016/1/23 下午 03:00:19Slider', 'Slider']
| | child_window(title="1", auto_id="1076", control_type="Slider")
| | |
| | | Thumb - '位置' (L22, T182, R29, B194)
| | | ['Thumb', '位置Thumb', '位置', 'Thumb0', 'Thumb1', '位置Thumb0', '位置Thumb1', '位置0', '位置1']
| | | child_window(title="位置", control_type="Thumb")
| | |
| | | Button - '向右翻頁' (L29, T186, R1618, B190)
| | | ['向右翻頁', 'Button', '向右翻頁Button', 'Button0', 'Button1']
| | | child_window(title="向右翻頁", control_type="Button")
| |
| | ListBox - '' (L16, T220, R1904, B346)
| | ['ListBox', '2016/1/23 下午 03:00:19ListBox']
| | child_window(auto_id="1073", control_type="List")
| | |
| | | ScrollBar - '垂直' (L1885, T222, R1902, B344)
| | | ['ScrollBar', '垂直ScrollBar', '垂直']
| | | child_window(title="垂直", auto_id="NonClientVerticalScrollBar", control_type="ScrollBar")
| | | |
| | | | Button - '上移一行' (L1885, T222, R1902, B239)
| | | | ['Button2', '上移一行', '上移一行Button']
| | | | child_window(title="上移一行", auto_id="UpButton", control_type="Button")
| | | |
| | | | Thumb - '位置' (L1885, T239, R1902, B319)
| | | | ['Thumb2', '位置Thumb2', '位置2']
| | | | child_window(title="位置", auto_id="ScrollbarThumb", control_type="Thumb")
| | | |
| | | | Button - '向下翻頁' (L1885, T319, R1902, B327)
| | | | ['向下翻頁Button', 'Button3', '向下翻頁']
| | | | child_window(title="向下翻頁", auto_id="DownPageButton", control_type="Button")
| | | |
| | | | Button - '下移一行' (L1885, T327, R1902, B344)
| | | | ['Button4', '下移一行', '下移一行Button']
| | | | child_window(title="下移一行", auto_id="DownButton", control_type="Button")
| | |
| | | ListItem - '1 : 24 : Ax T2 FLAIR' (L27, T222, R111, B322)
| | | ['ListItem', '1 : 24 : Ax T2 FLAIR', '1 : 24 : Ax T2 FLAIRListItem', 'ListItem0', 'ListItem1']
| | | child_window(title="1 : 24 : Ax T2 FLAIR", control_type="ListItem")
| | |
| | | ListItem - '2 : 48 : Ax DWI 1000b' (L126, T222, R219, B322)
| | | ['2 : 48 : Ax DWI 1000bListItem', 'ListItem2', '2 : 48 : Ax DWI 1000b']
| | | child_window(title="2 : 48 : Ax DWI 1000b", control_type="ListItem")
| | |
| | | ListItem - '3 : 125 : 3DTOF 2 slab MRA' (L227, T222, R324, B322)
| | | ['3 : 125 : 3DTOF 2 slab MRA', '3 : 125 : 3DTOF 2 slab MRAListItem', 'ListItem3']
| | | child_window(title="3 : 125 : 3DTOF 2 slab MRA", control_type="ListItem")
| | |
| | | ListItem - '4 : 24 : Ax FRFSE T2' (L340, T222, R416, B322)
| | | ['4 : 24 : Ax FRFSE T2', 'ListItem4', '4 : 24 : Ax FRFSE T2ListItem']
| | | child_window(title="4 : 24 : Ax FRFSE T2", control_type="ListItem")
| | |
| | | ListItem - '5 : 196 : COR 3D FSPGR IrP' (L430, T222, R532, B322)
| | | ['5 : 196 : COR 3D FSPGR IrP', 'ListItem5', '5 : 196 : COR 3D FSPGR IrPListItem']
| | | child_window(title="5 : 196 : COR 3D FSPGR IrP", control_type="ListItem")
| | |
| | | ListItem - '6 : 19 : SAG FRFSE T2' (L546, T222, R622, B322)
| | | ['6 : 19 : SAG FRFSE T2', 'ListItem6', '6 : 19 : SAG FRFSE T2ListItem']
| | | child_window(title="6 : 19 : SAG FRFSE T2", control_type="ListItem")
| | |
| | | ListItem - '7 : 24 : Ax T1 FC' (L636, T222, R739, B306)
| | | ['7 : 24 : Ax T1 FC', 'ListItem7', '7 : 24 : Ax T1 FCListItem']
| | | child_window(title="7 : 24 : Ax T1 FC", control_type="ListItem")
| | |
| | | ListItem - '8 : 24 : Apparent Diffusion Coefficient (mm?/s)' (L739, T222, R842, B322)
| | | ['8 : 24 : Apparent Diffusion Coefficient (mm?/s)ListItem', 'ListItem8', '8 : 24 : Apparent Diffusion Coefficient (mm?/s)']
| | | child_window(title="8 : 24 : Apparent Diffusion Coefficient (mm?/s)", control_type="ListItem")
| | |
| | | ListItem - '9 : 20 : TOF-RL' (L846, T222, R940, B306)
| | | ['9 : 20 : TOF-RL', '9 : 20 : TOF-RLListItem', 'ListItem9']
| | | child_window(title="9 : 20 : TOF-RL", control_type="ListItem")
| |
| | Pane - '' (L1624, T180, R1656, B212)
| | ['Pane', '004552Pane', 'Pane0', 'Pane1', '004552Pane0', '004552Pane1']
| | child_window(auto_id="100", control_type="Pane")
| |
| | Pane - '' (L1664, T180, R1696, B212)
| | ['Pane2', '004552Pane2']
| | child_window(auto_id="105", control_type="Pane")
| |
| | Pane - '' (L1704, T180, R1736, B212)
| | ['Pane3', '004552Pane3']
| | child_window(auto_id="104", control_type="Pane")
| |
| | Pane - '' (L1744, T180, R1776, B212)
| | ['Pane4', '004552Pane4']
| | child_window(auto_id="101", control_type="Pane")
| |
| | Pane - '' (L1784, T180, R1816, B212)
| | ['Pane5', '004552Pane5']
| | child_window(auto_id="106", control_type="Pane")
| |
| | Pane - '' (L1824, T180, R1856, B212)
| | ['Pane6', '004552Pane6']
| | child_window(auto_id="102", control_type="Pane")
| |
| | Pane - '' (L1864, T180, R1896, B212)
| | ['Pane7', '004552Pane7']
| | child_window(auto_id="103", control_type="Pane")
| |
| | TitleBar - '' (L16, T157, R1904, B180)
| | ['TitleBar', '2016/1/23 下午 03:00:19TitleBar', 'TitleBar0', 'TitleBar1']
|
| Pane - '' (L8, T69, R1912, B946)
| ['蕭輔仁Pane', 'Pane8', '蕭輔仁Pane0', '蕭輔仁Pane1']
| child_window(auto_id="displayPanel", control_type="Pane")
| |
| | Pane - '' (L8, T69, R1912, B946)
| | ['蕭輔仁Pane2', 'Pane9']
| | child_window(auto_id="DisplayView", control_type="Pane")
| | |
| | | Pane - 'STUDY_NAV_TOOLBAR0' (L8, T69, R979, B109)
| | | ['STUDY_NAV_TOOLBAR0', 'STUDY_NAV_TOOLBAR0Pane', 'Pane10']
| | | child_window(title="STUDY_NAV_TOOLBAR0", auto_id="STUDY_NAV_TOOLBAR0", control_type="Pane")
| | | |
| | | | Pane - '' (L442, T73, R474, B105)
| | | | ['Pane11', '3009684Pane']
| | | | child_window(auto_id="syncTextArea", control_type="Pane")
| | | |
| | | | Static - '3009684' (L393, T71, R442, B90)
| | | | ['3009684', 'Static', '3009684Static', 'Static0', 'Static1']
| | | | child_window(title="3009684", auto_id="patientIdLabel", control_type="Text")
| | | |
| | | | Static - '蕭輔仁' (L207, T71, R250, B90)
| | | | ['蕭輔仁', 'Static2', '蕭輔仁Static']
| | | | child_window(title="蕭輔仁", auto_id="patientNameLabel", control_type="Text")
| | | |
| | | | Static - '2016/1/23 下午 03:00:19' (L207, T90, R347, B109)
| | | | ['2016/1/23 下午 03:00:19', 'Static3', '2016/1/23 下午 03:00:19Static']
| | | | child_window(title="2016/1/23 下午 03:00:19", auto_id="studyDateTimeLabel", control_type="Text")
| | | |
| | | | Static - 'T0159343504' (L367, T90, R442, B109)
| | | | ['T01593435042', 'Static4', 'T0159343504Static']
| | | | child_window(title="T0159343504", auto_id="accessionNumberLabel", control_type="Text")
| | | |
| | | | Static - '第 1 個,共 2 個' (L839, T79, R934, B98)
| | | | ['第 1 個,共 2 個', '第 1 個,共 2 個Static', 'Static5', '第 1 個,共 2 個0', '第 1 個,共 2 個1']
| | | | child_window(title="第 1 個,共 2 個", auto_id="studyPositionLabel", control_type="Text")
| | | |
| | | | Button - '' (L139, T73, R203, B105)
| | | | ['Button5']
| | | | child_window(auto_id="137986", control_type="Button")
| | | |
| | | | Button - '' (L944, T73, R976, B105)
| | | | ['第 1 個,共 2 個Button', 'Button6', '第 1 個,共 2 個Button0', '第 1 個,共 2 個Button1']
| | | | child_window(auto_id="137988", control_type="Button")
| | | |
| | | | Button - '' (L802, T73, R834, B105)
| | | | ['3009684Button', 'Button7']
| | | | child_window(auto_id="138012", control_type="Button")
| | | |
| | | | Button - '第 1 個,共 2 個' (L766, T73, R798, B105)
| | | | ['第 1 個,共 2 個2', '第 1 個,共 2 個Button2', 'Button8']
| | | | child_window(title="第 1 個,共 2 個", auto_id="worklistDialogButton", control_type="Button")
| | | |
| | | | Button - '文字' (L11, T73, R135, B106)
| | | | ['文字Button', '文字', 'Button9']
| | | | child_window(title="文字", auto_id="ShowTextArea", control_type="Button")
| | |
| | | Pane - 'TOP_TOOLBAR0' (L979, T69, R1912, B149)
| | | ['Pane12', 'TOP_TOOLBAR0', 'TOP_TOOLBAR0Pane']
| | | child_window(title="TOP_TOOLBAR0", auto_id="TOP_TOOLBAR0", control_type="Pane")
| | | |
| | | | Button - '' (L1015, T105, R1047, B137)
| | | | ['第 1 個,共 2 個Button3', 'Button10']
| | | | child_window(auto_id="1513250", control_type="Button")
| | | |
| | | | Button - '' (L1015, T73, R1047, B105)
| | | | ['第 1 個,共 2 個Button4', 'Button11']
| | | | child_window(auto_id="1971742", control_type="Button")
| | | |
| | | | Button - '' (L1047, T73, R1079, B105)
| | | | ['第 1 個,共 2 個Button5', 'Button12']
| | | | child_window(auto_id="858352", control_type="Button")
| | | |
| | | | Button - '' (L1079, T73, R1111, B105)
| | | | ['第 1 個,共 2 個Button6', 'Button13']
| | | | child_window(auto_id="1185490", control_type="Button")
| | | |
| | | | Button - '' (L983, T73, R1015, B105)
| | | | ['第 1 個,共 2 個Button7', 'Button14']
| | | | child_window(auto_id="1447662", control_type="Button")
| | | |
| | | | Button - '' (L1143, T73, R1175, B105)
| | | | ['第 1 個,共 2 個Button8', 'Button15']
| | | | child_window(auto_id="530832", control_type="Button")
| | | |
| | | | Button - '' (L1079, T105, R1111, B137)
| | | | ['第 1 個,共 2 個Button9', 'Button16']
| | | | child_window(auto_id="988170", control_type="Button")
| | | |
| | | | Button - '' (L983, T105, R1015, B137)
| | | | ['第 1 個,共 2 個Button10', 'Button17']
| | | | child_window(auto_id="988388", control_type="Button")
| | | |
| | | | Button - '' (L1111, T105, R1143, B137)
| | | | ['第 1 個,共 2 個Button11', 'Button18']
| | | | child_window(auto_id="4133742", control_type="Button")
| | | |
| | | | Button - '' (L1111, T73, R1143, B105)
| | | | ['第 1 個,共 2 個Button12', 'Button19']
| | | | child_window(auto_id="923572", control_type="Button")
| | | |
| | | | Button - '' (L1047, T105, R1079, B137)
| | | | ['第 1 個,共 2 個Button13', 'Button20']
| | | | child_window(auto_id="989280", control_type="Button")
| | |
| | | Pane - 'STANDARD_TOOLBAR0' (L8, T109, R979, B149)
| | | ['Pane13', 'STANDARD_TOOLBAR0Pane', 'STANDARD_TOOLBAR0']
| | | child_window(title="STANDARD_TOOLBAR0", auto_id="STANDARD_TOOLBAR0", control_type="Pane")
| | | |
| | | | Button - '' (L12, T113, R44, B145)
| | | | ['Button21']
| | | | child_window(auto_id="2627966", control_type="Button")
| | | |
| | | | Button - '' (L244, T113, R276, B145)
| | | | ['Button22', '2016/1/23 下午 03:00:19Button', '2016/1/23 下午 03:00:19Button0', '2016/1/23 下午 03:00:19Button1']
| | | | child_window(auto_id="2822998", control_type="Button")
| | | |
| | | | Button - '' (L76, T113, R108, B145)
| | | | ['Button23']
| | | | child_window(auto_id="138014", control_type="Button")
| | | |
| | | | Button - '' (L44, T113, R76, B145)
| | | | ['Button24']
| | | | child_window(auto_id="138016", control_type="Button")
| | | |
| | | | Button - '' (L156, T113, R188, B145)
| | | | ['Button25']
| | | | child_window(auto_id="138010", control_type="Button")
| | | |
| | | | Button - '' (L108, T113, R140, B145)
| | | | ['Button26']
| | | | child_window(auto_id="138008", control_type="Button")
| | | |
| | | | Button - '' (L196, T113, R244, B145)
| | | | ['Button27', '2016/1/23 下午 03:00:19Button2']
| | | | child_window(auto_id="138000", control_type="Button")
| | | |
| | | | Button - '' (L284, T113, R316, B145)
| | | | ['蕭輔仁Button', 'Button28']
| | | | child_window(auto_id="1578746", control_type="Button")
| | | |
| | | | Button - '' (L332, T113, R380, B145)
| | | | ['Button29', '2016/1/23 下午 03:00:19Button3']
| | | | child_window(auto_id="138018", control_type="Button")
| | | |
| | | | Button - '' (L380, T113, R412, B145)
| | | | ['T0159343504Button', 'Button30', 'T0159343504Button0', 'T0159343504Button1']
| | | | child_window(auto_id="WINDOW_LEVEL_TOOLS", control_type="Button")
| | | |
| | | | Button - '' (L428, T113, R476, B145)
| | | | ['T0159343504Button2', 'Button31']
| | | | child_window(auto_id="138004", control_type="Button")
| | | |
| | | | Button - '' (L476, T113, R508, B145)
| | | | ['T0159343504Button3', 'Button32']
| | | | child_window(auto_id="MAGNIFY_TOOLS", control_type="Button")
| | | |
| | | | Button - '' (L524, T113, R572, B145)
| | | | ['T0159343504Button4', 'Button33']
| | | | child_window(auto_id="138006", control_type="Button")
| | | |
| | | | Button - '' (L572, T113, R604, B145)
| | | | ['T0159343504Button5', 'Button34']
| | | | child_window(auto_id="GEOMETRY_TOOLS", control_type="Button")
| | | |
| | | | Button - '' (L620, T113, R668, B145)
| | | | ['T0159343504Button6', 'Button35']
| | | | child_window(auto_id="138002", control_type="Button")
| | | |
| | | | Button - '' (L668, T113, R700, B145)
| | | | ['T0159343504Button7', 'Button36']
| | | | child_window(auto_id="MARKUP_TOOLS", control_type="Button")
| | |
| | | Pane - 'STATUS_BAR0' (L1448, T916, R1912, B946)
| | | ['Pane14', 'STATUS_BAR0', 'STATUS_BAR0Pane']
| | | child_window(title="STATUS_BAR0", auto_id="STATUS_BAR0", control_type="Pane")
| | | |
| | | | Pane - '' (L1448, T916, R1912, B946)
| | | | ['CGEMR1C1Pane', 'Pane15']
| | | | child_window(auto_id="StatusBarView", control_type="Pane")
| | | | |
| | | | | Static - 'DICOM S: 6 I: 1' (L1450, T932, R1596, B946)
| | | | | ['DICOM S: 6 I: 1', 'Static6', 'DICOM S: 6 I: 1Static']
| | | | | child_window(title="DICOM S: 6 I: 1", auto_id="DicomImageNumbers", control_type="Text")
| | | | |
| | | | | Static - '' (L1611, T916, R1679, B930)
| | | | | ['Static7', '0.54Static', '0.54Static0', '0.54Static1']
| | | | | child_window(auto_id="PixelValue", control_type="Text")
| | | | |
| | | | | Static - '0.54' (L1554, T916, R1610, B930)
| | | | | ['0.54', 'Static8', '0.54Static2']
| | | | | child_window(title="0.54", auto_id="Magnification", control_type="Text")
| | | | |
| | | | | Static - 'CGEMR1C1' (L1463, T916, R1553, B930)
| | | | | ['Static9', 'CGEMR1C1Static', 'CGEMR1C1']
| | | | | child_window(title="CGEMR1C1", auto_id="Calibration", control_type="Text")
| | | | |
| | | | | Static - 'Ax FRFSE T2' (L1680, T916, R1911, B930)
| | | | | ['Ax FRFSE T2Static', 'Static10', 'Ax FRFSE T2']
| | | | | child_window(title="Ax FRFSE T2", auto_id="SeriesDescription", control_type="Text")
| | | | |
| | | | | Static - 'S: 4/9 I: 1/24' (L1611, T932, R1911, B946)
| | | | | ['S: 4/9 I: 1/24', 'Static11', 'S: 4/9 I: 1/24Static']
| | | | | child_window(title="S: 4/9 I: 1/24", auto_id="ClientImageNumbers", control_type="Text")
| | |
| | | Pane - 'CYCLE_CONTROL' (L8, T916, R1448, B946)
| | | ['CYCLE_CONTROL', 'CYCLE_CONTROLPane', 'Pane16']
| | | child_window(title="CYCLE_CONTROL", auto_id="CYCLE_CONTROL", control_type="Pane")
| | | |
| | | | Pane - '' (L8, T916, R1448, B946)
| | | | ['N122119493Pane', 'Pane17', 'N122119493Pane0', 'N122119493Pane1']
| | | | child_window(auto_id="CycleView", control_type="Pane")
| | | | |
| | | | | Pane - '' (L8, T916, R1448, B946)
| | | | | ['N122119493Pane2', 'Pane18']
| | | | | child_window(auto_id="setAsideContainer", control_type="Pane")
| | | | | |
| | | | | | Pane - '' (L8, T916, R114, B946)
| | | | | | ['N122119493Pane3', 'Pane19']
| | | | | | child_window(auto_id="OpenItemPanel", control_type="Pane")
| | | | | | |
| | | | | | | Static - 'N122119493' (L13, T921, R93, B940)
| | | | | | | ['N122119493', 'Static12', 'N122119493Static', 'N1221194930', 'N1221194931']
| | | | | | | child_window(title="N122119493", auto_id="identificationOneLabel", control_type="Text")
| | | | | | |
| | | | | | | Button - 'N122119493' (L94, T923, R110, B938)
| | | | | | | ['N1221194932', 'Button37', 'N122119493Button']
| | | | | | | child_window(title="N122119493", auto_id="15864150", control_type="Button")
| | |
| | | Pane - 'STUDY_IMAGE_VIEWER' (L8, T149, R1912, B916)
| | | ['STUDY_IMAGE_VIEWERPane', 'Pane20', 'STUDY_IMAGE_VIEWER']
| | | child_window(title="STUDY_IMAGE_VIEWER", auto_id="STUDY_IMAGE_VIEWER", control_type="Pane")
| | | |
| | | | Pane - '' (L8, T149, R1912, B916)
| | | | ['Pane21', '2016/1/23 下午 03:00:19Pane']
| | | | child_window(auto_id="StudyImageViewer", control_type="Pane")
| | | | |
| | | | | Pane - 'Screen_0_format_21_Position_0_0' (L8, T354, R958, B633)
| | | | | ['Pane22', 'Screen_0_format_21_Position_0_0', 'Screen_0_format_21_Position_0_0Pane']
| | | | | child_window(title="Screen_0_format_21_Position_0_0", auto_id="26", control_type="Pane")
| | | | |
| | | | | Pane - 'Screen_0_format_21_Position_0_1' (L958, T354, R1908, B633)
| | | | | ['Screen_0_format_21_Position_0_1Pane', 'Screen_0_format_21_Position_0_1', 'Pane23']
| | | | | child_window(title="Screen_0_format_21_Position_0_1", auto_id="27", control_type="Pane")
| | | | |
| | | | | Pane - 'Screen_0_format_21_Position_1_0' (L8, T633, R958, B912)
| | | | | ['Screen_0_format_21_Position_1_0', 'Pane24', 'Screen_0_format_21_Position_1_0Pane']
| | | | | child_window(title="Screen_0_format_21_Position_1_0", auto_id="28", control_type="Pane")
| | | | |
| | | | | Pane - 'Screen_0_format_21_Position_1_1' (L958, T633, R1908, B912)
| | | | | ['Screen_0_format_21_Position_1_1', 'Pane25', 'Screen_0_format_21_Position_1_1Pane']
| | | | | child_window(title="Screen_0_format_21_Position_1_1", auto_id="29", control_type="Pane")
| | |
| | | Static - '正在擷取檢查...' (L849, T487, R1072, B527)
| | | ['正在擷取檢查...', 'Static13', '正在擷取檢查...Static']
| | | child_window(title="正在擷取檢查...", auto_id="noImagesAvailableLabel", control_type="Text")
|
| Pane - '' (L8, T31, R1912, B946)
| ['蕭輔仁Pane3', 'Pane26']
| child_window(auto_id="xPanderListContainer", control_type="Pane")
| |
| | Pane - '' (L8, T31, R1912, B69)
| | ['Pane27']
| | child_window(auto_id="listXPander", control_type="Pane")
| | |
| | | Static - '004552' (L1637, T37, R1691, B62)
| | | ['004552', 'Static14', '004552Static', '0045520', '0045521']
| | | child_window(title="004552", auto_id="userIDLabel", control_type="Text")
| | |
| | | Button - '登出' (L1861, T35, R1907, B64)
| | | ['Button38', '登出Button', '登出']
| | | child_window(title="登出", auto_id="logoutButton", control_type="Button")
| | |
| | | Button - '004552' (L1817, T38, R1841, B62)
| | | ['0045522', 'Button39', '004552Button', '004552Button0', '004552Button1']
| | | child_window(title="004552", auto_id="helpActionButton", control_type="Button")
| | |
| | | Button - '求助說明' (L1737, T35, R1815, B64)
| | | ['求助說明', '求助說明Button', 'Button40']
| | | child_window(title="求助說明", auto_id="helpButton", control_type="Button")
| | |
| | | Button - '' (L1693, T38, R1717, B62)
| | | ['004552Button2', 'Button41']
| | | child_window(auto_id="userActionButton", control_type="Button")
|
| TitleBar - '' (L24, T3, R1912, B31)
| ['TitleBar2']
| |
| | Menu - '系統' (L8, T8, R30, B30)
| | ['Menu', '系統', '系統Menu', '系統0', '系統1']
| | child_window(title="系統", auto_id="MenuBar", control_type="MenuBar")
| | |
| | | MenuItem - '系統' (L8, T8, R30, B30)
| | | ['系統MenuItem', '系統2', 'MenuItem']
| | | child_window(title="系統", control_type="MenuItem")
| |
| | Button - '最小化' (L1773, T1, R1820, B31)
| | ['最小化Button', '最小化', 'Button42']
| | child_window(title="最小化", control_type="Button")
| |
| | Button - '最大化' (L1820, T1, R1866, B31)
| | ['最大化Button', 'Button43', '最大化']
| | child_window(title="最大化", control_type="Button")
| |
| | Button - '關閉' (L1866, T1, R1913, B31)
| | ['關閉Button', '關閉', 'Button44']
| | child_window(title="關閉", control_type="Button")

88
IMPAX/models.py Normal file
View file

@ -0,0 +1,88 @@
import datetime
from sqlalchemy import Boolean, Column, Date, DateTime, ForeignKey, Integer, String
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from slugify import slugify
#engine = create_engine('sqlite:///:memory:', echo=True)
engine = create_engine('sqlite:///impax.db', echo=False)
#engine = create_engine('sqlite:////dev/shm/test.db', echo=True)
Base = declarative_base()
'''
['Group', 'View',
'patient.patient_id', 'patient.patient_name', 'patient.patient_name_ph_utf8',
'study.accession_number', 'study.date_time_created', 'study.modality', 'study.num_images', 'study.status', 'study.study_date', 'study.study_description', 'study.study_time']
'''
class Patient(Base):
__tablename__ = 'patient'
patient_id = Column(String, primary_key=True)
patient_name = Column(String)
patient_name_ph_utf8 = Column(String)
counter = Column(Integer)
created = Column(DateTime, default=datetime.datetime.now)
saved = Column(DateTime, onupdate=datetime.datetime.now)
studies = relationship("Study")
class Study(Base):
__tablename__ = 'study'
accession_number = Column(String, primary_key=True)
date_time_created = Column(String)
modality = Column(String)
num_images = Column(Integer)
status = Column(String)
study_date = Column(Date)
study_description = Column(String)
study_time = Column(String)
report = Column(String)
run = Column(String)
success = Column(Boolean)
patient_id = Column(String, ForeignKey('patient.patient_id'))
created = Column(DateTime, default=datetime.datetime.now)
saved = Column(DateTime, onupdate=datetime.datetime.now)
def naming(self):
return f'{self.study_date}_{self.modality}_{self.accession_number}'
class Series(Base):
__tablename__ = 'series'
series_uid = Column(String, primary_key=True)
name = Column(String)
document0 = Column(String)
document1 = Column(String)
accession_number = Column(String, ForeignKey('study.accession_number'))
created = Column(DateTime, default=datetime.datetime.now)
saved = Column(DateTime, onupdate=datetime.datetime.now)
def slugify(self):
return slugify(self.name)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)

40
IMPAX/move.py Normal file
View file

@ -0,0 +1,40 @@
from pathlib import Path
import os
import shutil
from models import *
SrcRoot = Path('/media/nfs/SRS/storage/0/')
ShownRoot = Path('/media/nfs/SRS/export/shown/')
HiddenRoot = Path('/media/nfs/SRS/export/hidden/')
session = Session()
for entry in os.scandir(SrcRoot):
if entry.is_dir():
for study in session.query(Study).filter_by(run=entry.name):
print(entry.name, study.naming())
src = Path(entry.path) / study.patient_id / study.naming()
src_shown = src/'shown'
src_hidden = src/'hidden'
# print(src_shown, src_hidden)
dst = f'{study.patient_id}/{study.naming()}'
dst_shown = ShownRoot /dst
dst_hidden = HiddenRoot/dst
# print(dst)
print(src_shown, dst_shown)
if dst_shown.is_dir():
shutil.rmtree(dst_shown)
shutil.move(src_shown , dst_shown)
print(src_hidden, dst_hidden)
if dst_hidden.is_dir():
shutil.rmtree(dst_hidden)
shutil.move(src_hidden, dst_hidden)
shutil.rmtree(entry.path)

157
IMPAX/nb1.ipynb Normal file

File diff suppressed because one or more lines are too long

297
IMPAX/nb2.ipynb Normal file

File diff suppressed because one or more lines are too long

113
IMPAX/nn/models.py Normal file
View file

@ -0,0 +1,113 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
OUT_CHANNELS = 32
class N902(nn.Module):
# 32, 144.878
# 64, 135.952
# 128, 128.388
def __init__(self):
super(N90, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1 , OUT_CHANNELS, 3, padding=1)
self.conv2 = nn.Conv2d(OUT_CHANNELS, 2 , 3, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.conv2(x)
return x
class N903(nn.Module):
# 32, 79.591
# 64, 69.663
def __init__(self):
super(N90, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1 , OUT_CHANNELS, 3, padding=1)
self.conv2 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv3 = nn.Conv2d(OUT_CHANNELS, 2 , 3, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.conv3(x)
return x
class N904(nn.Module):
# 32, 65.503
# 64, 55.369
def __init__(self):
super(N90, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1 , OUT_CHANNELS, 3, padding=1)
self.conv2 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv3 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv4 = nn.Conv2d(OUT_CHANNELS, 2 , 3, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.conv4(x)
return x
class N90(nn.Module):
# 32, 48.523
def __init__(self):
super(N90, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1 , OUT_CHANNELS, 3, padding=1)
self.conv2 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv3 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv4 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv5 = nn.Conv2d(OUT_CHANNELS, 2 , 3, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.conv5(x)
return x
class N906(nn.Module):
# 32, 43.330
def __init__(self):
super(N90, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1 , OUT_CHANNELS, 3, padding=1)
self.conv2 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv3 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv4 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv5 = nn.Conv2d(OUT_CHANNELS, OUT_CHANNELS, 3, padding=1)
self.conv6 = nn.Conv2d(OUT_CHANNELS, 2 , 3, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.conv6(x)
return x
# net = N90_100()
# print(net)

View file

@ -0,0 +1,21 @@
import os
from PIL import Image, ImageFilter, ImageMath
import numpy as np
from sklearn.feature_extraction import image
STUDY_PATH = "/media/cifs/shares/SRS/storage/tmp/MRI With_Without Contrast--Brain_53820330"
MODEL_PATH = '/home/xfr/nni/model-5-64/TwNuKtj7/best_zdoyO.pth'
for jpg_file in sorted(os.listdir(STUDY_PATH)):
jpg_path = os.path.join(STUDY_PATH, jpg_file)
print(jpg_path)
img = Image.open(jpg_path).convert('L')
print(img)
one_image = np.array(img)
print(one_image)
patches = image.extract_patches_2d(one_image, (2, 2))
exit()

22
IMPAX/nni/config.yml Normal file
View file

@ -0,0 +1,22 @@
authorName: default
experimentName: example_impax_pytorch
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 100
#choice: local, remote, pai
trainingServicePlatform: local
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
classArgs:
#choice: maximize, minimize
# optimize_mode: maximize
optimize_mode: minimize
trial:
command: python3 main.py
codeDir: .
gpuNum: 1

199
IMPAX/nni/dataset.py Normal file
View file

@ -0,0 +1,199 @@
import os
import random
from PIL import Image, ImageFilter, ImageMath
from scipy import ndimage
import numpy as np
import torch
PATCH_SIZE = 256
# JPGDIR = '/media/nfs/SRS/IMPAX/'
# JPGDIR = '/shares/Public/IMPAX/'
def img_frombytes(data):
size = data.shape[::-1]
databytes = np.packbits(data, axis=1)
return Image.frombytes(mode='1', size=size, data=databytes)
def getpatch(width, height):
w = random.randint(0, width-1)//PATCH_SIZE * PATCH_SIZE
if w > width - PATCH_SIZE:
w = width - PATCH_SIZE
h = random.randint(0, height-1)//PATCH_SIZE * PATCH_SIZE
if h > height - PATCH_SIZE:
h = height - PATCH_SIZE
return w, h
class IMPAXDataset(object):
def __init__(self, JPGDIR):
# self.root = root
# self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
self.ST_90 = []
self.ST_100 = []
self.ST_AN = []
self.ST_TXT = []
self.MAXSHAPE = None
self.MAXSIZE = 0
self.MINSHAPE = None
self.MINSIZE = 9999 * 9999
self.gets = 0
for pid in sorted(os.listdir(JPGDIR)):
PATDIR = os.path.join(JPGDIR, pid)
for study in sorted(os.listdir(PATDIR)):
if study.endswith('_100'):
ST100_DIR = os.path.join(PATDIR, study)
TXT_DIR = ST100_DIR.replace('_100', '_TXT')
os.makedirs(TXT_DIR, exist_ok=True)
for jpg in sorted(os.listdir(ST100_DIR)):
jpg_path = os.path.join(ST100_DIR, jpg)
txt_path = jpg_path.replace('_100', '_TXT').replace('.jpg', '.png')
self.ST_100.append(jpg_path)
self.ST_90.append(jpg_path.replace('_100', '_90'))
self.ST_AN.append(jpg_path.replace('_100', '_AN'))
self.ST_TXT.append(txt_path)
if os.path.isfile(txt_path):
continue
img = Image.open(jpg_path).convert('L')
width, height = img.size
size = width * height
if self.MAXSIZE < size:
self.MAXSIZE = size
self.MAXSHAPE = width, height
if self.MINSIZE > size:
self.MINSIZE = size
self.MINSHAPE = width, height
if os.path.isfile(txt_path):
continue
jpg_ndarray = np.array(img)
# CC = (0xCB <= jpg_ndarray <= 0xCD)
CC = np.logical_and(jpg_ndarray >= 0xCB, jpg_ndarray <= 0xCD)
C0 = (jpg_ndarray <= 0x01)
MASK = np.logical_or(CC, C0)
MASK = np.roll(MASK, -1, 0)
MASK = np.roll(MASK, -1, 1)
# MASKED = np.logical_and(CC, MASK).astype('uint8') * 255
MASKED = np.logical_and(CC, MASK).astype('uint8')
FILTERD = ndimage.rank_filter(MASKED, rank=-2, size=3)
FILTERD = np.minimum(MASKED, FILTERD)
im = img_frombytes(FILTERD)
im.save (txt_path)
if self.MINSHAPE:
print(self.MINSHAPE)
if self.MAXSHAPE:
print(self.MAXSHAPE)
def __getitem__(self, idx):
# self.gets += 1
# print(self.gets)
st_90 = Image.open(self.ST_90[idx]).convert('L')
st_AN = Image.open(self.ST_AN[idx]).convert('L')
st_TX = Image.open(self.ST_TXT[idx]).convert('L')
width, height = st_90.size
# print(idx, ST_90[idx])
w, h = getpatch(width, height)
# print(w, h)
s2_90 = np.array(st_90)[np.newaxis, h:h+PATCH_SIZE, w:w+PATCH_SIZE]
s2_AN = np.array(st_AN)[h:h+PATCH_SIZE, w:w+PATCH_SIZE]
s2_TX = np.array(st_TX)[h:h+PATCH_SIZE, w:w+PATCH_SIZE]
s2_AN_TX = np.stack( (s2_AN,s2_TX) )
# print(s2_90.shape, s2_AN_TX.shape)
# exit()
# print(s2_90)
# exit()
# return s2_90, s2_AN
# return s2_90[np.newaxis, :, :], s2_AN[np.newaxis, :, :]
return torch.from_numpy(s2_90).float(), torch.from_numpy(s2_AN_TX).float()
# load images ad masks
img_path = os.path.join(self.root, "PNGImages", self.imgs[idx])
mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
# convert the PIL Image into a numpy array
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.ST_100)

246
IMPAX/nni/main.py Normal file
View file

@ -0,0 +1,246 @@
"""
A deep MNIST classifier using convolutional layers.
This file is a modification of the official pytorch mnist example:
https://github.com/pytorch/examples/blob/master/mnist/main.py
"""
import os
import argparse
import logging
import nni
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from dataset import *
from models import *
logger = logging.getLogger('IMPAX_AutoML')
criterion = nn.MSELoss()
# criterion = nn.MSELoss(reduction='sum')
# criterion = nn.MSELoss
# MODEL_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = "/home/xfr/nni/"
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
train_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
train_loss += loss * len(data)
loss.backward()
optimizer.step()
if batch_idx % args['log_interval'] == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
logger.info('Train Epoch {}:\tAverage Loss: {:.6f}'.format(
epoch,
train_loss / len(train_loader.dataset),
))
# nni.get_experiment_id()
# nni.get_trial_id()
# nni.get_sequence_id()
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
# logger.info('criterion(output, target).item() %s' % criterion(output, target).item())
# logger.info('len(test_loader) %s' % len(test_loader))
# logger.info('len(data) %s' % len(data))
test_loss += criterion(output, target).item() * len(data)
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
# correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
# accuracy = 100. * correct / len(test_loader.dataset)
# logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
# test_loss, correct, len(test_loader.dataset), accuracy))
logger.info('Test set: Average loss: {:.4f}, {}'.format(
test_loss,
len(test_loader.dataset),
))
return test_loss
# def main(args):
# use_cuda = not args['no_cuda'] and torch.cuda.is_available()
# torch.manual_seed(args['seed'])
# device = torch.device("cuda" if use_cuda else "cpu")
# kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# data_dir = os.path.join(args['data_dir'], nni.get_trial_id())
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(data_dir, train=True, download=True,
# transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=args['batch_size'], shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(data_dir, train=False, transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=1000, shuffle=True, **kwargs)
# hidden_size = args['hidden_size']
# model = Net(hidden_size=hidden_size).to(device)
# optimizer = optim.SGD(model.parameters(), lr=args['lr'],
# momentum=args['momentum'])
# for epoch in range(1, args['epochs'] + 1):
# train(args, model, device, train_loader, optimizer, epoch)
# test_acc = test(args, model, device, test_loader)
# if epoch < args['epochs']:
# # report intermediate result
# nni.report_intermediate_result(test_acc)
# logger.debug('test accuracy %g', test_acc)
# logger.debug('Pipe send intermediate result done.')
# else:
# # report final result
# nni.report_final_result(test_acc)
# logger.debug('Final result is %g', test_acc)
# logger.debug('Send final result done.')
def main(args):
use_cuda = not args['no_cuda'] and torch.cuda.is_available()
torch.manual_seed(args['seed'])
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# data_dir = os.path.join(args['data_dir'], nni.get_trial_id())
trainset = IMPAXDataset(os.path.join(args['data_dir'], 'train'))
testset = IMPAXDataset(os.path.join(args['data_dir'], 'test'))
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args['batch_size'],
shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=100,
shuffle=True, **kwargs)
hidden_layer = args['hidden_layer']
hidden_size = args['hidden_size']
model = Net(hidden_layer=hidden_layer, hidden_size=hidden_size).to(device)
optimizer = optim.Adam(model.parameters(),
lr=args['lr'],
# momentum=args['momentum'],
)
best_loss = None
for epoch in range(1, args['epochs'] + 1):
train(args, model, device, train_loader, optimizer, epoch)
test_loss = test(args, model, device, test_loader)
if best_loss is None or best_loss > test_loss :
best_loss = test_loss
model_subdir = nni.get_experiment_id()
if args['exp_name'] is None:
model_file = os.path.join(MODEL_DIR, model.name, model_subdir, 'best_{}.pth'.format(nni.get_trial_id()))
else:
model_file = os.path.join(MODEL_DIR, args['exp_name'], model.name, model_subdir, 'best_{}.pth'.format(nni.get_trial_id()))
parent_dir = os.path.dirname(model_file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
torch.save(model.state_dict(), model_file)
logger.info('model saved: %s' % model_file)
if epoch < args['epochs']:
# report intermediate result
nni.report_intermediate_result(test_loss)
logger.debug('test loss %g', test_loss)
logger.debug('Pipe send intermediate result done.')
else:
# report final result
nni.report_final_result(test_loss)
logger.debug('Final result is %g', test_loss)
logger.debug('Send final result done.')
logger.info(' ')
def get_params():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch IMPAX Example')
# parser.add_argument("--data_dir", type=str,
# default='/tmp/tensorflow/mnist/input_data', help="data directory")
parser.add_argument("--data_dir", type=str,
default='/shares/Public/IMPAX/', help="data directory")
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument("--hidden_size", type=int, default=512, metavar='N',
help='hidden layer size (default: 512)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--log_interval', type=int, default=1000, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--exp_name', default=None, type=str, help='exp name')
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
try:
# get parameters form tuner
tuner_params = nni.get_next_parameter()
logger.debug(tuner_params)
params = vars(get_params())
params.update(tuner_params)
main(params)
except Exception as exception:
logger.exception(exception)
raise

27
IMPAX/nni/models.py Normal file
View file

@ -0,0 +1,27 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, hidden_layer, hidden_size):
super(Net, self).__init__()
self.name = 'model-%d-%d'%(hidden_layer, hidden_size)
self.hidden_layer = hidden_layer
self.conv_a = nn.Conv2d(1 , hidden_size, 3, padding=1)
self.conv_x = nn.Conv2d(hidden_size, hidden_size, 3, padding=1)
self.conv_z = nn.Conv2d(hidden_size, 2 , 3, padding=1)
def forward(self, x):
x = F.relu(self.conv_a(x))
for i in range(self.hidden_layer):
x = F.relu(self.conv_x(x))
x = self.conv_z(x)
return x

235
IMPAX/nni/nb1.ipynb Normal file

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,7 @@
{
"batch_size": {"_type":"choice", "_value": [4, 8, 16, 32, 64]},
"hidden_layer":{"_type":"choice","_value":[0, 1, 2, 3, 4, 5]},
"hidden_size":{"_type":"choice","_value":[4, 8, 16, 32, 64, 128]},
"lr":{"_type":"choice","_value":[0.00001, 0.0001, 0.001, 0.01, 0.1]}
}

147
IMPAX/py_inspect.py Normal file
View file

@ -0,0 +1,147 @@
import sys
from pywinauto import backend
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
def main():
app = QApplication(sys.argv)
w = MyWindow()
w.show()
sys.exit(app.exec_())
class MyWindow(QWidget):
def __init__(self, *args):
QWidget.__init__(self, *args)
self.setFixedSize(930, 631)
self.setLocale(QLocale(QLocale.English, QLocale.UnitedStates))
self.setWindowTitle(QCoreApplication.translate("MainWindow", "PyInspect"))
self.central_widget = QWidget(self)
self.comboBox = QComboBox(self.central_widget)
self.comboBox.setGeometry(QRect(10, 10, 451, 22))
self.comboBox.setMouseTracking(False)
self.comboBox.setMaxVisibleItems(5)
self.comboBox.setObjectName("comboBox")
for _backend in backend.registry.backends.keys():
self.comboBox.addItem(_backend)
self.tree_view = QTreeView(self.central_widget)
self.tree_view.setGeometry(QRect(10, 40, 451, 581))
self.tree_view.setColumnWidth(0, 150)
self.comboBox.setCurrentText('uia')
self.__initialize_calc()
self.table_view = QTableView(self.central_widget)
self.table_view.setGeometry(QRect(470, 40, 451, 581))
self.comboBox.activated[str].connect(self.__show_tree)
def __initialize_calc(self, _backend='uia'):
self.element_info = backend.registry.backends[_backend].element_info_class()
self.tree_model = MyTreeModel(self.element_info, _backend)
self.tree_model.setHeaderData(0, Qt.Horizontal, 'Controls')
self.tree_view.setModel(self.tree_model)
self.tree_view.clicked.connect(self.__show_property)
def __show_tree(self, text):
backend = text
self.__initialize_calc(backend)
def __show_property(self, index=None):
data = index.data()
self.table_model = MyTableModel(self.tree_model.props_dict.get(data), self)
self.table_view.wordWrap()
self.table_view.setModel(self.table_model)
self.table_view.setColumnWidth(1, 320)
class MyTreeModel(QStandardItemModel):
def __init__(self, element_info, backend):
QStandardItemModel.__init__(self)
root_node = self.invisibleRootItem()
self.props_dict = {}
self.backend = backend
self.branch = QStandardItem(self.__node_name(element_info))
self.branch.setEditable(False)
root_node.appendRow(self.branch)
self.__generate_props_dict(element_info)
self.__get_next(element_info, self.branch)
def __get_next(self, element_info, parent):
for child in element_info.children():
self.__generate_props_dict(child)
child_item = QStandardItem(self.__node_name(child))
child_item.setEditable(False)
parent.appendRow(child_item)
self.__get_next(child, child_item)
def __node_name(self, element_info):
if 'uia' == self.backend:
return '%s "%s" (%s)' % (str(element_info.control_type), str(element_info.name), id(element_info))
return '"%s" (%s)' % (str(element_info.name), id(element_info))
def __generate_props_dict(self, element_info):
props = [
['control_id', str(element_info.control_id)],
['class_name', str(element_info.class_name)],
['enabled', str(element_info.enabled)],
['handle', str(element_info.handle)],
['name', str(element_info.name)],
['process_id', str(element_info.process_id)],
['rectangle', str(element_info.rectangle)],
['rich_text', str(element_info.rich_text)],
['visible', str(element_info.visible)]
]
props_win32 = [
] if (self.backend == 'win32') else []
props_uia = [
['control_type', str(element_info.control_type)],
['element', str(element_info.element)],
['framework_id', str(element_info.framework_id)],
['runtime_id', str(element_info.runtime_id)]
] if (self.backend == 'uia') else []
props.extend(props_uia)
props.extend(props_win32)
node_dict = {self.__node_name(element_info): props}
self.props_dict.update(node_dict)
class MyTableModel(QAbstractTableModel):
def __init__(self, datain, parent=None, *args):
QAbstractTableModel.__init__(self, parent, *args)
self.arraydata = datain
self.header_labels = ['Property', 'Value']
def rowCount(self, parent):
return len(self.arraydata)
def columnCount(self, parent):
return len(self.arraydata[0])
def data(self, index, role):
if not index.isValid():
return QVariant()
elif role != Qt.DisplayRole:
return QVariant()
return QVariant(self.arraydata[index.row()][index.column()])
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self.header_labels[section]
return QAbstractTableModel.headerData(self, section, orientation, role)
if __name__ == "__main__":
main()

50
IMPAX/readjpg.py Normal file
View file

@ -0,0 +1,50 @@
import os
from PIL import Image, ImageFilter, ImageMath
from scipy import ndimage
import numpy as np
JPGDIR = '/media/nfs/SRS/IMPAX/'
def img_frombytes(data):
size = data.shape[::-1]
databytes = np.packbits(data, axis=1)
return Image.frombytes(mode='1', size=size, data=databytes)
for pid in sorted(os.listdir(JPGDIR)):
PATDIR = os.path.join(JPGDIR, pid)
for study in sorted(os.listdir(PATDIR)):
if study.endswith('_100'):
ST100_DIR = os.path.join(PATDIR, study)
for jpg in sorted(os.listdir(ST100_DIR)):
jpg_path = os.path.join(ST100_DIR, jpg)
img = Image.open(jpg_path).convert('L')
jpg_ndarray = np.array(img)
# CC = (0xCB <= jpg_ndarray <= 0xCD)
CC = np.logical_and(jpg_ndarray >= 0xCB, jpg_ndarray <= 0xCD)
C0 = (jpg_ndarray <= 0x01)
MASK = np.logical_or(CC, C0)
MASK = np.roll(MASK, -1, 0)
MASK = np.roll(MASK, -1, 1)
# MASKED = np.logical_and(CC, MASK).astype('uint8') * 255
MASKED = np.logical_and(CC, MASK).astype('uint8')
FILTERD = ndimage.rank_filter(MASKED, rank=-2, size=3)
FILTERD = np.minimum(MASKED, FILTERD)
# im = Image.fromarray(FILTERD )
im = img_frombytes(FILTERD)
im.save ('/shares/Public/0/0.png')
# print(CC)
# print(CC.shape)
# im = img_frombytes(FINAL)
# imr = im.filter(ImageFilter.RankFilter(3, 3*3-2))
# out = ImageMath.eval("min(a, b)", a=im, b=imr)
# out.save ('/shares/Public/0/0.png')
# img.save ('/shares/Public/0/0.png')
exit()

96
IMPAX/split-jpg.py Normal file
View file

@ -0,0 +1,96 @@
import os
import statistics
import imageio
from PIL import Image, ImageFilter, ImageMath
import numpy as np
import SimpleITK as sitk
STUDY_PATH = "/media/nfs/SRS/storage/0/CT Without Contrast-Brain_55121720"
STUDY_PATH = '/media/nfs/SRS/storage/0/MRI With_Without Contrast--Brain_54141890'
MODEL_PATH = '/home/xfr/nni/model-5-64/TwNuKtj7/best_zdoyO.pth'
# Write image series using SimpleITK
def flush_file(shape, fileNames):
if len(fileNames) > 1:
xy = min(shape)
outfile = '%s.nii.gz' % os.path.basename(fileNames[0]).split('.')[0]
img = sitk.ReadImage(fileNames)
img.SetSpacing([1.0,1.0, 1.0*xy/len(fileNames)])
sitk.WriteImage(img, outfile)
COR_ABS_THRESHOLD = 0.5
COR_REL_THRESHOLD = 0.8
def lower_bound(cors):
THRESHOLD = 3
if len(cors) < 2:
return 0
return min(statistics.mean(cors) - statistics.stdev(cors) * THRESHOLD, min(cors[:-1]))
def lower_bound2(cors):
THRESHOLD = 1.5
if len(cors) < 1:
return 0
Q1 = np.percentile(cors, 25, interpolation = 'lower')
Q3 = np.percentile(cors, 25, interpolation = 'higher')
IQR = Q3 - Q1
return min(Q1 - THRESHOLD * IQR, min(cors[:-1]))
def main():
old_shape = None
old_array = None
old_cor = COR_ABS_THRESHOLD
fileNames = []
cors = []
for jpg_file in sorted(os.listdir(STUDY_PATH)):
jpg_path = os.path.join(STUDY_PATH, jpg_file)
array = np.asarray(Image.open(jpg_path).convert('L'))
shape = array.shape
# LB = lower_bound(cors)
if not fileNames:
cor = COR_ABS_THRESHOLD
else:
if old_shape != shape:
cor =0
else:
# cor = correlate (old_array, array)
cor = np.corrcoef(old_array.flat, array.flat)[0,1]
cors.append(cor)
# if cor < COR_ABS_THRESHOLD or cor < old_cor * COR_REL_THRESHOLD:
LB = lower_bound(cors)
if cor < COR_ABS_THRESHOLD or cor < LB:
flush_file(old_shape, fileNames)
fileNames = [jpg_path]
cors = []
mark = '**********'
else:
fileNames.append(jpg_path)
mark = len(fileNames)
print('%s %.4f %.4f %s' %(jpg_file,cor, LB, mark))
old_array = array
old_shape = shape
old_cor = cor
flush_file(old_shape, fileNames)
if __name__ == '__main__':
main()

114
IMPAX/split-jpg2.py Normal file
View file

@ -0,0 +1,114 @@
import os
import statistics
import imageio
from PIL import Image, ImageFilter, ImageMath
import numpy as np
import SimpleITK as sitk
STUDY_PATH = "/media/nfs/SRS/storage/0/CT Without Contrast-Brain_55121720"
STUDY_PATH = '/media/nfs/SRS/storage/0/MRI With_Without Contrast--Brain_54141890'
MODEL_PATH = '/home/xfr/nni/model-5-64/TwNuKtj7/best_zdoyO.pth'
# Write image series using SimpleITK
def flush_file(shape, fileNames):
if len(fileNames) > 1:
xy = min(shape)
outfile = '%s.nii.gz' % os.path.basename(fileNames[0]).split('.')[0]
img = sitk.ReadImage(fileNames)
img.SetSpacing([1.0,1.0, 1.0*xy/len(fileNames)])
sitk.WriteImage(img, outfile)
COR_ABS_THRESHOLD = 0.5
COR_REL_THRESHOLD = 0.8
def lower_bound(cors, begin, end):
# THRESHOLD = 3
THRESHOLD = 5
return np.mean(cors[begin+1:end]) - np.std(cors[begin+1:end]) * THRESHOLD
def lower_bound(cors, begin, end):
# Not so good
# THRESHOLD = 1.5
THRESHOLD = 2
Q1 = np.percentile(cors[begin+1:end], 25, interpolation = 'lower')
Q3 = np.percentile(cors[begin+1:end], 75, interpolation = 'higher')
IQR = Q3 - Q1
return Q1 - THRESHOLD * IQR
NewSer = None
def check_low(cors, begin, end):
if end - begin < 2:
return
mini = np.min(cors[begin+1:end])
if mini > COR_ABS_THRESHOLD and mini > lower_bound(cors, begin, end):
# exit()
return
argmin = np.argmin(cors[begin+1:end]) + begin+1
print(begin, end, lower_bound(cors, begin, end), mini, argmin)
NewSer[argmin] = 1
check_low(cors, begin, argmin)
check_low(cors, argmin, end)
# exit()
def main():
global NewSer
old_shape = None
old_array = None
old_cor = COR_ABS_THRESHOLD
fileNames = []
shapes = []
cors = []
for jpg_file in sorted(os.listdir(STUDY_PATH)):
jpg_path = os.path.join(STUDY_PATH, jpg_file)
array = np.asarray(Image.open(jpg_path).convert('L'))
shape = array.shape
# LB = lower_bound(cors)
if not fileNames:
cor = 0
else:
if old_shape != shape:
cor =0
else:
cor = np.corrcoef(old_array.flat, array.flat)[0,1]
fileNames.append(jpg_path)
shapes.append(shape)
cors.append(cor)
old_array = array
old_shape = shape
old_cor = cor
length = len(fileNames)
# print(length)
# exit()
for i in range(length):
# print(i)
print(fileNames[i], i, shapes[i], cors[i])
NewSer = np.zeros(length)
check_low(cors, 0, length)
for i in range(length):
# print(i)
print(fileNames[i], i, shapes[i], cors[i], '***' if NewSer[i] else '')
if __name__ == '__main__':
main()

124
IMPAX/split-jpg3.py Normal file
View file

@ -0,0 +1,124 @@
import os
import statistics
import imageio
from PIL import Image, ImageFilter, ImageMath
import numpy as np
import SimpleITK as sitk
STUDY_PATH = "/media/nfs/SRS/storage/0/CT Without Contrast-Brain_55121720"
STUDY_PATH = '/media/nfs/SRS/storage/0/MRI With_Without Contrast--Brain_54141890'
STUDY_PATH ='/media/nfs/SRS/storage/0/MRI With_Without Contrast--Brain_4879927'
MODEL_PATH = '/home/xfr/nni/model-5-64/TwNuKtj7/best_zdoyO.pth'
# Write image series using SimpleITK
def flush_file(shape, fileNames):
if len(fileNames) > 1:
xy = min(shape)
outfile = '%s.nii.gz' % os.path.basename(fileNames[0]).split('.')[0]
img = sitk.ReadImage(fileNames)
img.SetSpacing([1.0,1.0, 1.0*xy/len(fileNames)])
sitk.WriteImage(img, outfile)
# COR_ABS_THRESHOLD = 0.5
# COR_REL_THRESHOLD = 0.8
COR_LO_THRESHOLD = 0.5
COR_HI_THRESHOLD = 0.7
def lower_bound_normal(cors, begin, end):
THRESHOLD = 3
# THRESHOLD = 5
return np.mean(cors[begin+1:end]) - np.std(cors[begin+1:end]) * THRESHOLD
def lower_bound(cors, begin, end):
THRESHOLD = 1.5
# THRESHOLD = 2
Q1 = np.percentile(cors[begin+1:end], 25, interpolation = 'lower')
Q3 = np.percentile(cors[begin+1:end], 75, interpolation = 'higher')
IQR = Q3 - Q1
return Q1 - THRESHOLD * IQR
NewSer = None
def check_low(cors, begin, end):
if end - begin < 2:
return
mini = np.min(cors[begin+1:end])
if mini > COR_HI_THRESHOLD:
return
if mini > COR_LO_THRESHOLD and mini > lower_bound(cors, begin, end):
# exit()
return
argmin = np.argmin(cors[begin+1:end]) + begin+1
print(begin, end, lower_bound(cors, begin, end), mini, argmin)
NewSer[argmin] = 1
check_low(cors, begin, argmin)
check_low(cors, argmin, end)
# exit()
def main():
global NewSer
old_shape = None
old_array = None
old_cor = COR_LO_THRESHOLD
fileNames = []
shapes = []
cors = []
for jpg_file in sorted(os.listdir(STUDY_PATH)):
jpg_path = os.path.join(STUDY_PATH, jpg_file)
array = np.asarray(Image.open(jpg_path).convert('L'))
shape = array.shape
# LB = lower_bound(cors)
if not fileNames:
cor = 0
else:
if old_shape != shape:
cor =0
else:
cor = np.corrcoef(old_array.flat, array.flat)[0,1]
fileNames.append(jpg_path)
shapes.append(shape)
cors.append(cor)
old_array = array
old_shape = shape
old_cor = cor
length = len(fileNames)
# print(length)
# exit()
for i in range(length):
# print(i)
print(fileNames[i], i, shapes[i], cors[i])
NewSer = np.zeros(length)
check_low(cors, 0, length)
start = 0
for i in range(length):
# print(i)
print(fileNames[i], i, shapes[i], cors[i], '***' if NewSer[i] else '')
if NewSer[i]:
if i - start > 1:
flush_file(shapes[start], fileNames[start:i])
start = i
flush_file(shapes[start], fileNames[start:i])
if __name__ == '__main__':
main()

142
IMPAX/split-jpg4.py Normal file
View file

@ -0,0 +1,142 @@
import os
import statistics
import imageio
from PIL import Image, ImageFilter, ImageMath
from scipy import ndimage
import numpy as np
import SimpleITK as sitk
STUDY_PATH = "/media/nfs/SRS/storage/0/CT Without Contrast-Brain_55121720"
STUDY_PATH = '/media/nfs/SRS/storage/0/MRI With_Without Contrast--Brain_54141890'
STUDY_PATH ='/media/nfs/SRS/storage/0/MRI With_Without Contrast--Brain_4879927'
# STUDY_PATH ='/media/nfs/SRS/storage/0/MRI With_Without Contrast--Brain_55850220'
MODEL_PATH = '/home/xfr/nni/model-5-64/TwNuKtj7/best_zdoyO.pth'
# Write image series using SimpleITK
def flush_file(shape, fileNames):
if len(fileNames) > 9:
xy = min(shape)
outfile = '%s.nii.gz' % os.path.basename(fileNames[0]).split('.')[0]
img = sitk.ReadImage(fileNames)
img.SetSpacing([1.0,1.0, 1.0*xy/len(fileNames)])
sitk.WriteImage(img, outfile)
# COR_ABS_THRESHOLD = 0.5
# COR_REL_THRESHOLD = 0.8
COR_LO_THRESHOLD = 0.5
COR_HI_THRESHOLD = 0.65
def lower_bound_normal(cors, begin, end):
THRESHOLD = 3
# THRESHOLD = 5
return np.mean(cors[begin+1:end]) - np.std(cors[begin+1:end]) * THRESHOLD
def lower_bound(cors, begin, end):
THRESHOLD = 1.5
# THRESHOLD = 2
Q1 = np.percentile(cors[begin+1:end], 25, interpolation = 'lower')
Q3 = np.percentile(cors[begin+1:end], 75, interpolation = 'higher')
IQR = Q3 - Q1
return Q1 - THRESHOLD * IQR
NewSer = None
def check_low(cors, begin, end):
if end - begin < 2:
return
mini = np.min(cors[begin+1:end])
if mini > COR_HI_THRESHOLD:
return
if mini > COR_LO_THRESHOLD and mini > lower_bound(cors, begin, end):
# exit()
return
argmin = np.argmin(cors[begin+1:end]) + begin+1
print(begin, end, lower_bound(cors, begin, end), mini, argmin)
NewSer[argmin] = 1
check_low(cors, begin, argmin)
check_low(cors, argmin, end)
# exit()
def main():
global NewSer
old_shape = None
old_array = None
old_cor = COR_LO_THRESHOLD
fileNames = []
shapes = []
cors = []
for jpg_file in sorted(os.listdir(STUDY_PATH)):
jpg_path = os.path.join(STUDY_PATH, jpg_file)
array = np.asarray(Image.open(jpg_path).convert('L'))
array = ndimage.rank_filter(array, 0, size=3)
shape = array.shape
# LB = lower_bound(cors)
if not fileNames:
cor = 0
else:
if old_shape != shape:
cor =0
else:
cor = np.corrcoef(old_array.flat, array.flat)[0,1]
fileNames.append(jpg_path)
shapes.append(shape)
cors.append(cor)
old_array = array
old_shape = shape
old_cor = cor
length = len(fileNames)
# print(length)
# exit()
# for i in range(length):
# # print(i)
# print(fileNames[i], i, shapes[i], cors[i])
start = 0
for i in range(length):
# print(i)
print(fileNames[i], i, shapes[i], cors[i], '***' if cors[i] < COR_HI_THRESHOLD else '')
if cors[i] < COR_HI_THRESHOLD:
if i - start > 1:
flush_file(shapes[start], fileNames[start:i])
start = i
flush_file(shapes[start], fileNames[start:i])
exit()
NewSer = np.zeros(length)
check_low(cors, 0, length)
start = 0
for i in range(length):
# print(i)
print(fileNames[i], i, shapes[i], cors[i], '***' if NewSer[i] else '')
if NewSer[i]:
if i - start > 1:
flush_file(shapes[start], fileNames[start:i])
start = i
flush_file(shapes[start], fileNames[start:i])
if __name__ == '__main__':
main()

BIN
IMPAX/test.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 686 B

97
IMPAX/test.py Normal file
View file

@ -0,0 +1,97 @@
import re
from models import *
pattern_strip = r'"(.*)"'
prog_strip = re.compile(pattern_strip)
def strip(s):
s1 = s.strip()
m = prog_strip.match(s1)
if m:
return m[1]
return s1
# Displays DICOM header information for an image
# , specialty ] = "CHEST" (
# [ object, original_order ] = 1 ( 4, 4 )
#dupe key {'private_creator', 'referenced_sop_class_uid', 'Unknown element', 'referenced_sop_instance_uid', 'group_length'}
pattern_header = r'\[ (.+), (.+) \] = (.+) \('
prog_header = re.compile(pattern_header)
def dicom_header(d0, d1):
dup_key = set()
dict0 = {}
for line in d0.split('\r\n'):
cols = line.split('|')
if len(cols) >= 5:
key = cols[1].strip()
if key in dict0:
dup_key.add(key)
dict0[key]=strip(cols[4])
else:
print(line)
dict1 = {}
for line in d1.split('\r\n'):
m = prog_header.search(line)
if m:
key1 = m[1].strip()
key2 = m[2].strip()
value = strip(m[3])
if key1 not in dict1:
dict1[key1] = {key2: value}
else:
if key2 in dict1[key1]:
dup_key.add(key2)
dict1[key1][key2] = value
else:
print(line)
for k in sorted(dict0.keys()):
print(k, dict0[k])
for k1 in sorted(dict1.keys()):
for k2 in sorted(dict1[k1].keys()):
print(k1, k2, dict1[k1][k2])
# exit()
# cols = line.split('|')
# if len(cols) == 5:
# dict0[cols[1].strip()]=strip(cols[4])
print(dup_key)
# print(dict1)
exit()
for k in sorted(d.keys()):
print(k, d[k])
exit()
session = Session()
for series in session.query(Series):
d0 = series.document0
d1 = series.document1
print(dicom_header(d0, d1))
exit()

17
IMPAX/test2.py Normal file
View file

@ -0,0 +1,17 @@
from appium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
desired_caps = {}
desired_caps["app"] = "Root"
desktop = webdriver.Remote(
command_executor='http://192.168.11.101:4723',
desired_capabilities= desired_caps)
OpenItemPanel = desktop.find_element_by_accessibility_id("OpenItemPanel")
ActionChains(desktop).move_to_element(OpenItemPanel).perform()
for e in OpenItemPanel.find_elements_by_xpath('*/*'):
print(e, e.tag_name, e.text)

146
IMPAX/train.py Normal file
View file

@ -0,0 +1,146 @@
import random
import statistics
import matplotlib.pyplot as plt
import torch.optim as optim
from dataset import *
from models import *
BATCH_SIZE = 32
TEST_STEP = 5
trainset = IMPAXDataset('/shares/Public/IMPAX/train')
testset = IMPAXDataset('/shares/Public/IMPAX/train')
# print(len(trainset))
# exit()
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=6)
testloader = torch.utils.data.DataLoader(testset, batch_size=TEST_STEP,
shuffle=True, num_workers=6)
# setting device on GPU if available, else CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = N90().to(device)
# criterion = nn.MSELoss(reduction='sum')z
criterion = nn.MSELoss()
# optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.5)
# optimizer = optim.Adam(net.parameters(), lr=0.01)
optimizer = optim.Adam(net.parameters())
# for epoch in range(3): # 训练所有!整套!数据 3 次
# for step, (batch_x, batch_y) in enumerate(trainloader): # 每一步 loader 释放一小批数据用来学习
# # 假设这里就是你训练的地方...
# # 打出来一些数据
# print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
# batch_x.numpy(), '| batch y: ', batch_y.numpy())
# exit()
train_loss = []
for epoch in range(99): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# print(inputs[0])
# print(labels[0])
# exit()
# print(inputs)
# break
# # continue
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
print('[%d, %5d] trian loss: %.3f' % (epoch + 1, i, running_loss/i))
train_loss.append(running_loss/i)
# print(train_loss)
# print(train_loss[-5:])
if epoch > 20:
if statistics.mean(train_loss[-10:-1]) > statistics.mean(train_loss[-20: -10]):
break
print('Finished Training')
test_loss = []
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images.to(device))
test_loss.append(criterion(labels, outputs.cpu()))
mean = torch.mean(torch.stack(test_loss))
print('test loss: %.3f' % (mean.item()))
######
plt.rcParams['figure.figsize'] = [20, 30]
dataiter = iter(testloader)
# dataiter = iter(trainloader)
images, labels = dataiter.next()
output = net(images.to(device))
# torch.set_printoptions(profile="full")
# print(labels[0])
# torch.set_printoptions(profile="default")
for j in range(TEST_STEP):
out = output[j]
plt.subplot(TEST_STEP,5,j*5+1)
plt.imshow(images[j][0,:,:], cmap='gray')
plt.subplot(TEST_STEP,5,j*5+2)
plt.imshow(labels[j][0,:,:], cmap='gray')
plt.subplot(TEST_STEP,5,j*5+3)
plt.imshow(out[0,:,:].cpu().detach().numpy(), cmap='gray')
plt.subplot(TEST_STEP,5,j*5+4)
plt.imshow(labels[j][1,:,:], cmap='gray')
plt.subplot(TEST_STEP,5,j*5+5)
plt.imshow(out[1,:,:].cpu().detach().numpy(), cmap='gray')
plt.show()

29
README Normal file
View file

@ -0,0 +1,29 @@
Command line instructions
Git global setup
git config --global user.name "Furen Xiao"
git config --global user.email "xfuren@gmail.com"
Create a new repository
git clone git@git126.ntuh.net:xfuren/test.git
cd test
touch README.md
git add README.md
git commit -m "add README"
git push -u origin master
Existing folder
cd existing_folder
git init
git remote add origin git@git126.ntuh.net:xfuren/test.git
git add .
git commit -m "Initial commit"
git push -u origin master
Existing Git repository
cd existing_repo
git remote rename origin old-origin
git remote add origin git@git126.ntuh.net:xfuren/test.git
git push -u origin --all
git push -u origin --tags

564
demo-patho.html Normal file

File diff suppressed because one or more lines are too long

404
demo-patho2.html Normal file

File diff suppressed because one or more lines are too long

435
forteo/Untitled.ipynb Normal file

File diff suppressed because one or more lines are too long

758
forteo/analysis.ipynb Normal file
View file

@ -0,0 +1,758 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from collections import Counter\n",
"\n",
"import math\n",
"import re\n",
"\n",
"from pandas import read_excel\n",
"from pymongo import MongoClient\n",
"from pyquery import PyQuery as pq\n",
"from scipy import stats\n",
"\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"SHEETS = (\n",
" (\"台灣大學醫學院附設醫院_201601-201809.xls\", \"Sheet1\"), \n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"frames = []"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"for file_name, sheet_name in SHEETS:\n",
" data = read_excel(file_name, sheet_name)\n",
" frames.append(data)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"df=pd.concat(frames, ignore_index=True, sort=False)\n",
"df.to_excel('concat2.xls')"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>醫院</th>\n",
" <th>醫師</th>\n",
" <th>系統編號</th>\n",
" <th>病患姓名</th>\n",
" <th>簽署日</th>\n",
" <th>key-in日</th>\n",
" <th>患者出生年月日</th>\n",
" <th>患者狀況</th>\n",
" <th>流失/停藥日期</th>\n",
" <th>用藥時間</th>\n",
" <th>病患是否參加P1NP</th>\n",
" </tr>\n",
" <tr>\n",
" <th>是否自費</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>健保給付</th>\n",
" <td>80</td>\n",
" <td>80</td>\n",
" <td>80</td>\n",
" <td>80</td>\n",
" <td>80</td>\n",
" <td>80</td>\n",
" <td>80</td>\n",
" <td>80</td>\n",
" <td>43</td>\n",
" <td>80</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>自費</th>\n",
" <td>271</td>\n",
" <td>271</td>\n",
" <td>271</td>\n",
" <td>271</td>\n",
" <td>271</td>\n",
" <td>271</td>\n",
" <td>271</td>\n",
" <td>271</td>\n",
" <td>187</td>\n",
" <td>271</td>\n",
" <td>1</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 醫院 醫師 系統編號 病患姓名 簽署日 key-in日 患者出生年月日 患者狀況 流失/停藥日期 用藥時間 \\\n",
"是否自費 \n",
"健保給付 80 80 80 80 80 80 80 80 43 80 \n",
"自費 271 271 271 271 271 271 271 271 187 271 \n",
"\n",
" 病患是否參加P1NP \n",
"是否自費 \n",
"健保給付 0 \n",
"自費 1 "
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# print(df['是否自費'])\n",
"df.groupby('是否自費').count()"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead tr th {\n",
" text-align: left;\n",
" }\n",
"\n",
" .dataframe thead tr:last-of-type th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr>\n",
" <th></th>\n",
" <th colspan=\"8\" halign=\"left\">用藥時間</th>\n",
" <th colspan=\"8\" halign=\"left\">系統編號</th>\n",
" </tr>\n",
" <tr>\n",
" <th></th>\n",
" <th>count</th>\n",
" <th>mean</th>\n",
" <th>std</th>\n",
" <th>min</th>\n",
" <th>25%</th>\n",
" <th>50%</th>\n",
" <th>75%</th>\n",
" <th>max</th>\n",
" <th>count</th>\n",
" <th>mean</th>\n",
" <th>std</th>\n",
" <th>min</th>\n",
" <th>25%</th>\n",
" <th>50%</th>\n",
" <th>75%</th>\n",
" <th>max</th>\n",
" </tr>\n",
" <tr>\n",
" <th>是否自費</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>健保給付</th>\n",
" <td>80.0</td>\n",
" <td>12.334821</td>\n",
" <td>7.326534</td>\n",
" <td>0.142857</td>\n",
" <td>6.455357</td>\n",
" <td>12.392857</td>\n",
" <td>18.651786</td>\n",
" <td>24.0</td>\n",
" <td>80.0</td>\n",
" <td>40738.875000</td>\n",
" <td>3719.022181</td>\n",
" <td>35494.0</td>\n",
" <td>37584.0</td>\n",
" <td>40289.5</td>\n",
" <td>44118.75</td>\n",
" <td>47796.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>自費</th>\n",
" <td>271.0</td>\n",
" <td>7.450053</td>\n",
" <td>6.200420</td>\n",
" <td>0.214286</td>\n",
" <td>2.732143</td>\n",
" <td>5.642857</td>\n",
" <td>10.160714</td>\n",
" <td>24.0</td>\n",
" <td>271.0</td>\n",
" <td>41558.937269</td>\n",
" <td>3793.111387</td>\n",
" <td>35258.0</td>\n",
" <td>38007.0</td>\n",
" <td>41723.0</td>\n",
" <td>44780.00</td>\n",
" <td>47792.0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 用藥時間 \\\n",
" count mean std min 25% 50% 75% \n",
"是否自費 \n",
"健保給付 80.0 12.334821 7.326534 0.142857 6.455357 12.392857 18.651786 \n",
"自費 271.0 7.450053 6.200420 0.214286 2.732143 5.642857 10.160714 \n",
"\n",
" 系統編號 \\\n",
" max count mean std min 25% 50% \n",
"是否自費 \n",
"健保給付 24.0 80.0 40738.875000 3719.022181 35494.0 37584.0 40289.5 \n",
"自費 24.0 271.0 41558.937269 3793.111387 35258.0 38007.0 41723.0 \n",
"\n",
" \n",
" 75% max \n",
"是否自費 \n",
"健保給付 44118.75 47796.0 \n",
"自費 44780.00 47792.0 "
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.groupby('是否自費').describe()"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>醫院</th>\n",
" <th>醫師</th>\n",
" <th>系統編號</th>\n",
" <th>病患姓名</th>\n",
" <th>簽署日</th>\n",
" <th>key-in日</th>\n",
" <th>患者出生年月日</th>\n",
" <th>流失/停藥日期</th>\n",
" <th>是否自費</th>\n",
" <th>用藥時間</th>\n",
" <th>病患是否參加P1NP</th>\n",
" </tr>\n",
" <tr>\n",
" <th>患者狀況</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>10-成功問卷</th>\n",
" <td>111</td>\n",
" <td>111</td>\n",
" <td>111</td>\n",
" <td>111</td>\n",
" <td>111</td>\n",
" <td>111</td>\n",
" <td>111</td>\n",
" <td>18</td>\n",
" <td>111</td>\n",
" <td>111</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>11-成功問卷-次回拒訪</th>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>12-成功問卷-已停藥</th>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>210</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>20-電話錯誤/無此人</th>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>24-拒訪</th>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>0</td>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>26-往生</th>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>0</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2B-五次聯絡不到</th>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2D-暫時停藥-觀察中</th>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2E-暫時停藥-住院中</th>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2G-連續三個月連絡不到</th>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>0</td>\n",
" <td>6</td>\n",
" <td>6</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>31-無人接聽</th>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>33-語音信箱/答錄機</th>\n",
" <td>2</td>\n",
" <td>2</td>\n",
" <td>2</td>\n",
" <td>2</td>\n",
" <td>2</td>\n",
" <td>2</td>\n",
" <td>2</td>\n",
" <td>1</td>\n",
" <td>2</td>\n",
" <td>2</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 醫院 醫師 系統編號 病患姓名 簽署日 key-in日 患者出生年月日 流失/停藥日期 是否自費 \\\n",
"患者狀況 \n",
"10-成功問卷 111 111 111 111 111 111 111 18 111 \n",
"11-成功問卷-次回拒訪 1 1 1 1 1 1 1 0 1 \n",
"12-成功問卷-已停藥 210 210 210 210 210 210 210 210 210 \n",
"20-電話錯誤/無此人 1 1 1 1 1 1 1 0 1 \n",
"24-拒訪 8 8 8 8 8 8 8 0 8 \n",
"26-往生 6 6 6 6 6 6 6 0 6 \n",
"2B-五次聯絡不到 1 1 1 1 1 1 1 0 1 \n",
"2D-暫時停藥-觀察中 3 3 3 3 3 3 3 1 3 \n",
"2E-暫時停藥-住院中 1 1 1 1 1 1 1 0 1 \n",
"2G-連續三個月連絡不到 6 6 6 6 6 6 6 0 6 \n",
"31-無人接聽 1 1 1 1 1 1 1 0 1 \n",
"33-語音信箱/答錄機 2 2 2 2 2 2 2 1 2 \n",
"\n",
" 用藥時間 病患是否參加P1NP \n",
"患者狀況 \n",
"10-成功問卷 111 0 \n",
"11-成功問卷-次回拒訪 1 0 \n",
"12-成功問卷-已停藥 210 1 \n",
"20-電話錯誤/無此人 1 0 \n",
"24-拒訪 8 0 \n",
"26-往生 6 0 \n",
"2B-五次聯絡不到 1 0 \n",
"2D-暫時停藥-觀察中 3 0 \n",
"2E-暫時停藥-住院中 1 0 \n",
"2G-連續三個月連絡不到 6 0 \n",
"31-無人接聽 1 0 \n",
"33-語音信箱/答錄機 2 0 "
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.groupby('患者狀況').count()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead tr th {\n",
" text-align: left;\n",
" }\n",
"\n",
" .dataframe thead tr:last-of-type th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr>\n",
" <th></th>\n",
" <th colspan=\"8\" halign=\"left\">用藥時間</th>\n",
" <th colspan=\"8\" halign=\"left\">系統編號</th>\n",
" </tr>\n",
" <tr>\n",
" <th></th>\n",
" <th>count</th>\n",
" <th>mean</th>\n",
" <th>std</th>\n",
" <th>min</th>\n",
" <th>25%</th>\n",
" <th>50%</th>\n",
" <th>75%</th>\n",
" <th>max</th>\n",
" <th>count</th>\n",
" <th>mean</th>\n",
" <th>std</th>\n",
" <th>min</th>\n",
" <th>25%</th>\n",
" <th>50%</th>\n",
" <th>75%</th>\n",
" <th>max</th>\n",
" </tr>\n",
" <tr>\n",
" <th>是否自費</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>健保給付</th>\n",
" <td>36.0</td>\n",
" <td>13.112103</td>\n",
" <td>6.840638</td>\n",
" <td>0.750000</td>\n",
" <td>7.678571</td>\n",
" <td>16.196429</td>\n",
" <td>18.651786</td>\n",
" <td>20.857143</td>\n",
" <td>36.0</td>\n",
" <td>38853.555556</td>\n",
" <td>3038.409645</td>\n",
" <td>35494.0</td>\n",
" <td>37015.75</td>\n",
" <td>37777.0</td>\n",
" <td>40363.25</td>\n",
" <td>47017.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>自費</th>\n",
" <td>174.0</td>\n",
" <td>7.182677</td>\n",
" <td>5.494101</td>\n",
" <td>0.678571</td>\n",
" <td>2.758929</td>\n",
" <td>5.678571</td>\n",
" <td>9.705357</td>\n",
" <td>23.714286</td>\n",
" <td>174.0</td>\n",
" <td>40316.954023</td>\n",
" <td>3181.145216</td>\n",
" <td>35335.0</td>\n",
" <td>37448.50</td>\n",
" <td>40391.5</td>\n",
" <td>42677.75</td>\n",
" <td>46896.0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 用藥時間 \\\n",
" count mean std min 25% 50% 75% \n",
"是否自費 \n",
"健保給付 36.0 13.112103 6.840638 0.750000 7.678571 16.196429 18.651786 \n",
"自費 174.0 7.182677 5.494101 0.678571 2.758929 5.678571 9.705357 \n",
"\n",
" 系統編號 \\\n",
" max count mean std min 25% 50% \n",
"是否自費 \n",
"健保給付 20.857143 36.0 38853.555556 3038.409645 35494.0 37015.75 37777.0 \n",
"自費 23.714286 174.0 40316.954023 3181.145216 35335.0 37448.50 40391.5 \n",
"\n",
" \n",
" 75% max \n",
"是否自費 \n",
"健保給付 40363.25 47017.0 \n",
"自費 42677.75 46896.0 "
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df2 = df.query('患者狀況 == \"12-成功問卷-已停藥\"')\n",
"df2.groupby('是否自費').describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

BIN
forteo/cAR.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

BIN
forteo/cAR3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

BIN
forteo/drug.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 KiB

1
forteo/ntuhgov Symbolic link
View file

@ -0,0 +1 @@
../ntuh/submodule/ntuhgov/

50
forteo/scrape.py Executable file
View file

@ -0,0 +1,50 @@
import re
# from openpyxl import load_workbook
from pandas import read_excel
from pymongo import MongoClient
SHEETS = (
("神外-總院_1070920.xlsx", "總院"),
# ("(骨穩)總院_分科1070928.xlsx", "工作表1")
("(骨穩)總院_分科1071002.xlsx", "工作表1")
)
client = MongoClient("mongodb.xiao.tw", 27017)
db = client.forteo
posts = db.posts
# print(posts.find_one())
# exit()
from ntuhgov.portal_selenium import *
from ntuhgov.myutil import *
for file_name, sheet_name in SHEETS:
data = read_excel(file_name, sheet_name)
for chartno in data.病歷號:
if not re.search("\d+", str(chartno)):
continue
print(chartno)
post = posts.find_one({"_id": chartno})
if post is None:
post = {"_id": chartno}
if 'drug' not in post:
post['drug'] = ShowMedicalRecordDrug(chartno)
if 'report' not in post:
post['report'] = ElectronicMedicalReportViewer(chartno)
if 'op' not in post:
post['op'] = OPNoteList(chartno)
posts.update({"_id": chartno}, {"$set": post}, upsert=True)

View file

@ -0,0 +1,16 @@
"""Convert TTC font to TTF using fontforge with python extension.
**Warning** The scripts saves splitted fonts in the current working directory.
Usage:
split_ttc_font_to_ttf.py Droid.ttc
"""
import sys
import fontforge
fonts = fontforge.fontsInFile(sys.argv[1])
for fontName in fonts:
font = fontforge.open('%s(%s)'%(sys.argv[1], fontName))
font.generate('%s.ttf'%fontName)
font.close()

18
ntuh/.project Executable file
View file

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>ntuh</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
<nature>org.python.pydev.django.djangoNature</nature>
</natures>
</projectDescription>

7
ntuh/.pydevproject Executable file
View file

@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">/home/xfr/myenv/bin/python</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
</pydev_project>

View file

@ -0,0 +1,17 @@
eclipse.preferences.version=1
encoding//ck/views.py=utf-8
encoding//fileupload/views.py=utf-8
encoding//nss/quarter.py=utf-8
encoding//ntuh/settings.py=utf-8
encoding//ntuhgov/portal.py=utf-8
encoding//ntuhgov/portal_spynner.py=utf-8
encoding//ntuhgov/xportal.py=utf-8
encoding//registry/models.py=utf-8
encoding//registry/utils.py=utf-8
encoding//registry/views.py=utf-8
encoding//research/x_classifier.py=utf-8
encoding/assistant.py=utf-8
encoding/context_processors.py=utf-8
encoding/get_inpatient.py=utf-8
encoding/getop.py=utf-8
encoding/getpatho.py=utf-8

792
ntuh/172_16_3_33_389.csv Executable file

File diff suppressed because one or more lines are too long

743
ntuh/172_16_3_33_389_201801.csv Executable file

File diff suppressed because one or more lines are too long

0
ntuh/FILE Normal file
View file

1
ntuh/README.txt Executable file
View file

@ -0,0 +1 @@
1234

0
ntuh/__init__.py Executable file
View file

14
ntuh/apache/django.wsgi Executable file
View file

@ -0,0 +1,14 @@
import os, sys
path = os.path.abspath(os.path.dirname(__file__)+'/../../')
#path = '/home/ntuh/domains/adm.ntuh.net/ntuh'
sys.path.append(path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'ntuh.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
path2 = '%s/ntuh/' % path
if path2 not in sys.path:
sys.path.append(path2)

72
ntuh/assistant.py Executable file
View file

@ -0,0 +1,72 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# 每個月的助手列表
FILENAME = '/Public/2012/09/2012年9月各月份外科手術主治及住院醫師資料.xls'
FILENAME = '/Public/2013/03/10203_[MonlyReport_M387]_T0_前月份外科手術主治及住院醫師資料.xls'
FILENAME = u'/shares/Public/2020/外科手術主治及住院醫師資料/10904_[MonlyReport_M387]_T0_前月份外科手術主治及住院醫師資料.xls'
from datetime import *
from xlrd import *
#from tempfile import TemporaryFile
from xlwt import Workbook
import os
import re
def Tokenization(s):
s = s.replace(u'', u' ')
s = s.replace(u'', u' ')
s = s.replace(u'.', u' ')
tlist = re.split('[a-zA-Z0-9_ ,;/\(\)!]+', s)
b = []
for t in tlist:
if t:
b.append(t)
print (b)
return b
def Sum(s):
adict = {}
print (s.name)
for row in range(s.nrows):
try:
code = s.cell(row,0).value
date_value = xldate_as_tuple(s.cell(row,1).value, wb.datemode)
d = datetime(*date_value)
physician = s.cell(row,2).value
assistants = s.cell(row,3).value
except:
continue
for a in Tokenization(assistants):
try:
adict[a] += 1
except:
adict[a] = 1
book = Workbook()
sheet1 = book.add_sheet('Sheet 1')
row = 0
for k in sorted(adict.keys()):
if adict[k] == 1:
continue
sheet1.write(row,0,k)
sheet1.write(row,1,adict[k])
row+=1
print (' %s,%s' % (k, adict[k]))
print ('\n\n\n')
out = '%s/%s.xls' % (os.path.dirname(FILENAME), s.name)
book.save(out)
wb = open_workbook(FILENAME)
for s in wb.sheets():
Sum(s)
# exit()

80
ntuh/categories.txt Executable file
View file

@ -0,0 +1,80 @@
手術
Brain tumor
Glioma
High grade
Low grade
Meningioma
Pituitary tumor
Acoustic neuroma
brain metastasis
Others
Vascular
Aneurysm (Microsurgery)
AVM (Excision)
EC-IC bypass
Endarterectomy
Cavernoma (Excision)
Dural AVF (Microsurgery)
CVA
Spontaneous ICH
Decompression for Infarction
Spine
高位頸椎(C1,2)手術
Tumor
Malignant
Benign
HIVD
Cervical
Lumber
Stenosis
Cervical
Lumber
Other Instrumentation
Cervical
Lumber
Others
Head injury
EDH
Acute SDH
Chronic SDH
Traumatic ICH
Cranioplasty
Infection (abscess,empyema)
Aspiration
Drainage
Excision
VP Shunt
Functional
DBS之顱內晶片
電池置入
脊椎刺激(SCS)療法
脊椎腔內Baclofen (ITB)療法/脊椎腔內Mophine (ITM)療法)
功能性腦部燒灼術(含(1)動作障礙(2)疼痛治療等)
trigeminal neuralgia, Hemifacial spasm
MVD
percutaneous rhizotomy
Hyperhidrosis
Peripheral nerves
Carpal tunnel syndrome
PNS
Epilepsy surgery
Endovascular surgery
Aneurysym (Coiling)
AVM
Dural AVF
Carotid angioplasty / stent
Stereotactic Radiosurgery (SRS)
Vascular
AVM
Cavernoma
Dural AVF
Tumors
Malignant
Metastases
Primary
Benign
Meningiomas
Schwannomas
Pituitary
Trigeminal neuralgia
Others

6
ntuh/ck/0.py Executable file
View file

@ -0,0 +1,6 @@
#!/usr/bin/env python
from intra import *
print HeightWeight('D120264406')
#print ReportResult('D120264406')

0
ntuh/ck/__init__.py Executable file
View file

25
ntuh/ck/admin.py Executable file
View file

@ -0,0 +1,25 @@
from .models import *
from django.contrib import admin
class PatientAdmin(admin.ModelAdmin):
# fields = ['pub_date', 'question']
list_display = ('name', 'medical_records')
#admin.site.register(Patient, PatientAdmin)
class TreatmentAdmin(admin.ModelAdmin):
# fields = ['pub_date', 'question']
list_display = ('patient', 'icd9', 'oncologist', 'surgeon', 'date_completed', 'memo')
list_filter = ['oncologist', 'surgeon', 'date_completed', 'accounting']
# search_fields = ['date_started', 'date_completed']
date_hierarchy = 'date_completed'
admin.site.register(Treatment, TreatmentAdmin)
class LesionAdmin(admin.ModelAdmin):
list_display = ('treatment_id', 'sub_location_target_location', 'pathology')
list_filter = ['pathology']
admin.site.register(Lesion, LesionAdmin)

20
ntuh/ck/forms.py Executable file
View file

@ -0,0 +1,20 @@
#coding=utf-8
#from django import newforms as forms
from django import forms
#from django.newforms import form_for_model
from django.forms import ModelForm
from models import *
#PatientForm = form_for_model(Patient)
class PatientForm(forms.Form):
ChartNo = forms.CharField()
Name = forms.CharField()
idcode = forms.CharField()
#TreatmentForm = form_for_model(Treatment)
class TreatmentForm(ModelForm):
class Meta:
model = Treatment

965
ntuh/ck/intra.py Executable file
View file

@ -0,0 +1,965 @@
#!/usr/bin/python
# coding=utf-8
# 2010-09-16 move from project cyberknife
PASSWORD = 'n122119493'
from datetime import date
import mechanize
from urllib2 import urlopen
#from ClientForm import ParseResponse
import datetime
import hashlib
import re
import urllib
import urllib2
import pdb
import math
import pprint
pp = pprint.PrettyPrinter()
br = mechanize.Browser(factory=mechanize.RobustFactory())
br.set_handle_robots(False)
def xtrace(R):
pdb.set_trace()
def remove_space(s):
return s.replace(' ','').strip()
#print remove_space(' 123 ')
def minguo2ce(minguo):
pattern = '(\d+)\.([ 0-9]{1,2})\.([ 0-9]{1,2})'
s = re.search(pattern, minguo)
if s:
yy = int(s.group(1))+1911
try:
mm = int(s.group(2))
except:
mm = 1
try:
dd = int(s.group(3))
except:
dd = 1
return date( yy, mm , dd )
pattern = '(\d+)/([ 0-9]{1,2})/([ 0-9]{1,2})'
s = re.search(pattern, minguo)
if s:
yy = int(s.group(1))+1911
try:
mm = int(s.group(2))
except:
mm = 1
try:
dd = int(s.group(3))
except:
dd = 1
return date( yy, mm , dd )
return
#print minguo2ce(' 75.01.25')
########## Old intra system
def Default_Dr(REQUEST):
if (REQUEST.has_key('ChartNo')) and REQUEST['ChartNo'] != "":
values = { 'ChartNo' : REQUEST['ChartNo'] }
elif (REQUEST.has_key('Name')) and REQUEST['Name'] != "":
values = { 'Name' : REQUEST['Name'].decode('utf_8').encode('big5') }
elif (REQUEST.has_key('idcode')) and REQUEST['idcode'] != "":
values = { 'idcode' : REQUEST['idcode'] }
else:
return ""
url = 'http://intra.mc.ntu.edu.tw/main/ChartNo/Default_Dr.asp'
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
the_page = response.read()
the_page = the_page.decode('big5','ignore').encode('utf_8')
pattern = ( '(?s)<td align="center">(.*?)</td>(.*?)'
+ '<td width="100"><font color="blue">(.*?)</font>(.*?)'
+ '<td align="center"><font color="blue">(.*?)</font>(.*?)'
+ '<td align="center">(.*?)</td>(.*?)'
+ '<td align="center">(.*?)</td>(.*?)'
+ '<td>(.*?)</td>(.*?)'
+ '<td>(.*?)</td>(.*?)'
+ '<td>(.*?)</td>' )
matches = re.findall(pattern, the_page)
result = []
for match in matches:
r = {}
r['name'] = remove_space(match[2])
r['medical_records'] = remove_space(match[4])
r['gender'] = remove_space(match[6])
r['birthday'] = minguo2ce(match[8])
r['address'] = remove_space(match[10])
r['phone'] = remove_space(match[12])
r['id_cards'] = remove_space(match[14])
result.append(r)
return result
#print Default_Dr({ 'HIS' : '',
# 'firstname' : '',
# 'MIS' : 'N122119493' })
def percent_encoding(keys, REQUEST):
data = {}
for key in keys:
if REQUEST.__contains__(key):
data[key] = REQUEST[key]
return urllib.urlencode(data)
#print percent_encoding(['HIS','MIS'],
# { 'HIS' : '3009684',
# 'firstname' : '',
# 'MIS' : 'N122119493' })
def CheckUser():
br.open("http://intra.mc.ntu.edu.tw/CheckUser_Ehospital.asp?myurl=default_Ehospital.asp")
br.select_form(name="form1")
br["uid"] = 'dtsurg08'
br["pwd"] = 'x'
response = br.submit() # submit current form
return response.read()
def chinese2date(chinese):
pattern = '(\d+)(\D+)(\d{1,2})(\D+)(\d{1,2})'
s = re.search(pattern, chinese)
if s:
yy = int(s.group(1))
mm = int(s.group(3))
dd = int(s.group(5))
return date( yy, mm , dd )
return
def oncrt_query(ChartNo):
url = "http://intra.mc.ntu.edu.tw/DigReport/ONC_RT/oncrt_query.asp"
response = br.open(url)
if response.read().decode('big5','ignore').encode('utf_8').find('閒置時間超過六十分鐘'):
CheckUser()
response = br.open(url)
br.select_form(name="FrontPage_Form1")
br["ChartNo"] = str(ChartNo)
response = br.submit()
body = response.read().decode('big5','ignore').encode('utf_8')
htmlcomments = re.compile('\<![ \r\n\t]*(--([^\-]|[\r\n]|-[^\-])*--[ \r\n\t]*)\>')
body = htmlcomments.sub('', body)
# print body
# body = body[body.find('←テᄄ¦ᄑヘ'):]
pattern = (
'(?s)<tr align="center">(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td>(.*?)</td>(\s*?)'
+'<td align="left">(.*?)</td>'
)
matches = re.findall(pattern, body)
result = []
for match in matches:
r = {}
r['site'] = remove_space(match[1])
r['way'] = remove_space(match[3])
r['radiation_energy'] = remove_space(match[5])
r['number_of_treatment'] = remove_space(match[7])
r['total_dose'] = remove_space(match[9])
r['start_date'] = chinese2date(match[11])
r['end_date'] = chinese2date(match[13])
r['the_number_of_treatment'] = remove_space(match[15])
r['remarks'] = remove_space(match[17])
result.append(r)
return result
def op_note_case(ChartNo):
url = "http://intra.mc.ntu.edu.tw/DigReport/OpNote/case.asp"
response = br.open(url)
if response.read().decode('big5','ignore').encode('utf_8').find('閒置時間超過六十分鐘'):
CheckUser()
response = br.open(url)
br.select_form(name="FrontPage_Form1")
br["ChartNo"] = str(ChartNo)
response = br.submit()
body = response.read().decode('big5','ignore').encode('utf_8')
htmlcomments = re.compile('\<![ \r\n\t]*(--([^\-]|[\r\n]|-[^\-])*--[ \r\n\t]*)\>')
body = htmlcomments.sub('', body)
# print body
# body = body[body.find('←テᄄ¦ᄑヘ'):]
pattern = (
'(?s)<TD ALIGN="center"><A HREF="OpNote.asp\?ChartNo=([^\n\r]*)(\s*?)'
+'([^\n\r]*?)(\s*?)'
+'</A></TD>(\s*?)'
+'<TD ALIGN="center">(\s*?)'
+'([^\n\r]*?)(\s*?)'
+'</TD>(\s*?)'
+'<TD>(\s*?)'
+'([^\n\r]*?)(\s*?)'
+'</TD>(\s*?)'
+'<TD>(\s*?)'
+'([^\n\r]*?)(\s*?)'
+'</TD>'
)
matches = re.findall(pattern, body)
result = []
for match in matches:
r = {}
r['surgery_date_time'] = minguo2ce(match[2])
r['division'] = remove_space(match[6])
r['name_surgery'] = remove_space(match[10])
r['surgeon'] = remove_space(match[14])
result.append(r)
return result
def path_exam(ChartNo):
url = "http://intra.mc.ntu.edu.tw/DigReport/Lab/PathExam.asp"
response = br.open(url)
if response.read().decode('big5','ignore').encode('utf_8').find('閒置時間超過六十分鐘'):
CheckUser()
response = br.open(url)
br.select_form(name="FrontPage_Form1")
br["ChartNo"] = str(ChartNo)
response = br.submit()
body = response.read().decode('big5','ignore').encode('utf_8')
htmlcomments = re.compile('\<![ \r\n\t]*(--([^\-]|[\r\n]|-[^\-])*--[ \r\n\t]*)\>')
body = htmlcomments.sub('', body)
# print body
# body = body[body.find('←テᄄ¦ᄑヘ'):]
'''
<TD ALIGN="center" WIDTH="50"><A HREF="PathReport_new.asp?PathCode=S0900742&amp;ChartNo=4129089&amp;ChineseName=¥ᄡヤ₩ヨヌ￧ᄃタ &amp;Sex=M&amp;Birthday=+0120815&amp;SpecimenCode=b &amp;SpecimenGetDate=098/01/08&amp;ReportDate=098/01/09&amp;VSIdNo=DTPAT031 &amp;RIdNo=&amp;ExamDr=DTPAT031 &amp;InCharge=¥ᄚᄂ¥ヨト￧ミᆭ ">S0900742</A></TD>
<TD ALIGN="center" WIDTH="80">HMC </TD>
<TD></TD>
<TD>b </TD>
<TD>098/01/08</TD>
<TD>098/01/09</TD>
'''
pattern = '''\
<TD ALIGN="center" WIDTH="50"><A HREF="PathReport(.*?).asp\?PathCode=(.*?)&amp;ChartNo=(.*?)&amp;ChineseName=(.*?)&amp;Sex=(.*?)&amp;Birthday=(.*?)&amp;SpecimenCode=(.*?)&amp;SpecimenGetDate=(.*?)&amp;ReportDate=(.*?)&amp;VSIdNo=(.*?)&amp;RIdNo=(.*?)&amp;ExamDr=(.*?)&amp;InCharge=(.*?)">(.*?)</A></TD>(\s*?)\
<TD ALIGN="center" WIDTH="80">(.*?)</TD>(\s*?)\
<TD>(.*?)</TD>(\s*?)\
<TD>(.*?)</TD>(\s*?)\
<TD>(.*?)</TD>(\s*?)\
<TD>(.*?)</TD>\
'''
matches = re.findall(pattern, body)
result = []
for match in matches:
# print match
r = {}
r['path_code'] = remove_space(match[1])
r['specimen_code'] = remove_space(match[6])
r['specimen_get_date'] = minguo2ce(match[7])
r['report_date'] = minguo2ce(match[8])
r['division'] = remove_space(match[15])
r['bed'] = remove_space(match[17])
purl = 'http://intra.mc.ntu.edu.tw/DigReport/Lab/PathReport.asp?PathCode=%s' % r['path_code']
# print purl
presponse = br.open(purl)
pbody = presponse.read().decode('big5','ignore').encode('utf_8')
pbody = htmlcomments.sub('', pbody)
'''
<td colspan="5" bgcolor="#F8F0E0" width="534"><font color="red">
Liver, biopsy, hepatocellular carcinoma<br>
'''
ppattern = '<td colspan="5" bgcolor="#F8F0E0" width="534"><font color="red">(.*?)(<br>|</font>)'
pmatches = re.findall(ppattern, pbody, re.DOTALL)
# print pmatches
report = []
for pmatch in pmatches:
report.append(remove_space(pmatch[0]))
# print report
r['report'] = ','.join(report)
result.append(r)
return result
def XrayExam(ChartNo):
url = "http://intra.mc.ntu.edu.tw/DigReport/Xray/XrayExam.asp"
url = 'http://portal.ntuh.gov.tw/DigReport/Xray/XrayExam.asp'
response = br.open(url)
if response.read().decode('big5','ignore').encode('utf_8').find('閒置時間超過六十分鐘'):
CheckUser()
response = br.open(url)
body = {}
br.select_form(name="FrontPage_Form1")
br["ChartNo"] = str(ChartNo)
br["durnum"] = ['12']
br["reptype"] = ['CT']
br["dattype"] = ['Y']
response = br.submit()
body['CT'] = response.read().decode('big5','ignore').encode('utf_8')
br.select_form(name="FrontPage_Form1")
br["ChartNo"] = str(ChartNo)
br["durnum"] = ['12']
br["reptype"] = ['MRI']
br["dattype"] = ['Y']
response = br.submit()
body['MRI'] = response.read().decode('big5','ignore').encode('utf_8')
# pattern="<a href='Ximage/XrayReport.asp?reportseqno=A20080629173&amp;ChartNo=4399879&amp;ChineseName=陳建錫 &amp;Sex=M&amp;Birthday=+0451119&amp;ExamDate=2008-06-28&amp;accessno=T0089317804&amp;ReferNo=T0089317804&amp;status=5'>Pelvis: for THR</a>"
pattern="<a href='(Ximage/XrayReport.asp\\?reportseqno=(.*?)&amp;ChartNo=(.*?)&amp;ChineseName=(.*?)&amp;Sex=(.*?)&amp;Birthday=(.*?)&amp;ExamDate=(.*?)&amp;accessno=(.*?)&amp;ReferNo=(.*?)&amp;status=(.*?))'>(.*?)</a>"
# pattern="Ximage/XrayReport.asp?reportseqno=(.*?)"
pattern = pattern.replace("'", "\\'")
pattern = pattern.replace('&', '\\&')
results = []
for m in ['CT', 'MRI']:
matches = re.findall(pattern, body[m])
for match in matches:
r = {}
r['reportseqno'] = remove_space(match[0])
r['ChartNo'] = remove_space(match[1])
r['ChineseName'] = remove_space(match[2])
r['Sex'] = remove_space(match[3])
r['Birthday'] = remove_space(match[4])
r['ExamDate'] = remove_space(match[5])
r['accessno'] = remove_space(match[6])
r['ReferNo'] = remove_space(match[7])
r['status'] = remove_space(match[8])
r['LinkOrderName'] = remove_space(match[9])
r['Modality'] = m
results.append(r)
pp.pprint(urllib2.unquote(match[0]))
# pp.pprint(results)
return results
########################################New portal systemn
def Login():
br.open("http://portal.ntuhrs.ntuh.gov.tw/General/Login.aspx")
br.select_form(name="Form1")
# br["rdblQuickMenu"] = ['O']
br["txtUserID"] = '004552'
br["txtPass"] = hashlib.md5(PASSWORD).hexdigest()
# print br.possible_items("rdblQuickMenu")
# print br.form
response = br.submit() # submit current form
pattern = "http://hisaw.ntuh.gov.tw/WebApplication/Clinics/OpenClinics.aspx\?SESSION=(\w*)"
string = str(response.read())
# print string
matches = re.findall(pattern, string)
return matches[0]
def HeightWeight(PersonID):
SESSION = Login()
url = "http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/QueryHeightWeightByPersonID.aspx?SESSION=%s&PersonID=%s" % (SESSION,PersonID)
response = br.open(url)
body = response.read()
'''
<span id="PatientHeightWeightGrid_ctl03_HeightLabel">171</span>
</font></td><td align="left"><font color="#333333">
<span id="PatientHeightWeightGrid_ctl03_HeightPerLabel"></span>
</font></td><td align="left"><font color="#333333">
<span id="PatientHeightWeightGrid_ctl03_WeightLabel">75</span>
'''
pattern = '''\
<span id="PatientHeightWeightGrid_ctl03_HeightLabel">(.*?)</span>(\s*?)\
</font></td><td align="left"><font color="#333333">(\s*?)\
<span id="PatientHeightWeightGrid_ctl03_HeightPerLabel">(.*?)</span>(\s*?)\
</font></td><td align="left"><font color="#333333">(\s*?)\
<span id="PatientHeightWeightGrid_ctl03_WeightLabel">(.*?)</span>\
'''
matches = re.findall(pattern, body)
# print matches[0]
if matches:
h = matches[0][0]
w = matches[0][6]
else:
h = 0
w = 0
try:
bsa = math.sqrt(float(h) * float(w) / 3600) #Mosteller formula
except:
bsa = 0
return {'Height': h, 'Weight': w, 'BSA': bsa}
def ReportResult(PersonID):
SESSION = Login()
url = "http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/ReportResultQuery.aspx?SESSION=%s&PersonID=%s" % (SESSION,PersonID)
response = br.open(url)
br.select_form(name="Form1")
response = br.submit() # submit current form
body = response.read()
print body
return
response = urlopen(url)
forms = ParseResponse(response, backwards_compat=False)
form = forms[0]
print form
form.set_all_readonly(False)
form["__EVENTTARGET"] = "LinkbuttonRadReport"
# form.click() returns a urllib2.Request object
# (see HTMLForm.click.__doc__ if you don't have urllib2)
print urlopen(form.click()).read()
def icd_query(ChartNo):
Login()
br.select_form(name="Form1")
br["NTUHWeb1:QueryPersonIDByChartNo2:txbChartNoInput"] = str(ChartNo)
br["NTUHWeb1:QueryPersonIDByChartNo2:AutoShowRecord"] = True
response = br.submit() # submit current form
return response.read()
################## os.getcwd()##############
def get_path():
import os,sys
return os.path.realpath(os.path.dirname(sys.argv[0]))
###############################
vs = {}
def unf_byDisDate(deptcode, StartDate, EndDate):
import csv
reader = csv.reader(open(get_path()+"/vs.csv", "rb"))
for row in reader:
# print row[1], row[0]
vs[row[1]]=row[0]
url = "http://intra.mc.ntu.edu.tw/main/Discharge/unf_byDisDate.asp"
response = br.open(url)
br.select_form(nr=0)
br["deptcode"] = [deptcode]
br["StartDate"] = StartDate
br["EndDate"] = EndDate
response = br.submit()
body = response.read().decode('big5','ignore').encode('utf_8')
pattern = """
<tr>\s*
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">
<p align="left">
(.*?)
</td>
<td align="center" bgcolor="#FFFFFF">
(.*?)
</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">
(.*?)
</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
<td align="center" bgcolor="#FFFFFF">(.*?)</td>
</tr>
"""
pattern = pattern.replace('"', '\\"')
pattern = pattern.replace('\n', '\\s*')
matches = re.findall(pattern, body)
result = []
for match in matches:
r = {}
r['no'] = remove_space(match[0])
r['doctor_code'] = remove_space(match[1])
r['doctor_name'] = remove_space(match[2])
r['discharge_date'] = remove_space(match[3])
r['patients_name'] = remove_space(match[4])
r['medical_record_number'] = remove_space(match[5])
r['account'] = remove_space(match[6])
r['admission_date'] = remove_space(match[7])
r['hospital_ bed'] = remove_space(match[8])
r['category'] = remove_space(match[9])
r['dr'] = remove_space(match[10])
r['resident'] = remove_space(match[11])
if vs.has_key(r['dr']):
r['division'] = vs[r['dr']]
else:
r['division'] = 'Others'
result.append(r)
return result
def formatDate(sDate):
dScrap = sDate
iDay = dScrap.day
iMon = dScrap.month
iYea = dScrap.year
sDay = str(iDay)
sMon = str(iMon)
sYea = str(iYea - 1911)
if len(sDay) == 1:
sDay = "0" + sDay
if len(sMon) == 1:
sMon = "0" + sMon
if len(sYea) == 2:
sYea = "0" + sYea;
sScrap = sYea + sMon + sDay;
return sScrap;
def unf_sort(StartDay, EndDay):
StartDate = datetime.date.today() + datetime.timedelta(days=StartDay)
EndDate = datetime.date.today() + datetime.timedelta(days=EndDay)
result = unf_byDisDate('SURG', formatDate(StartDate), formatDate(EndDate))
dr = []
resident = []
division = []
for r in result:
dr.append(r['dr'])
resident.append(r['resident'])
division.append(r['division'])
# The count is doubled, so we div it by 2
dr_freq = [(a, dr.count(a)/2) for a in set(dr)]
dr_sort = sorted(dr_freq, key=lambda x: -x[1])
resident_freq = [(a, resident.count(a)/2) for a in set(resident)]
resident_sort = sorted(resident_freq, key=lambda x: -x[1])
division_freq = [(a, division.count(a)/2) for a in set(division)]
division_sort = sorted(division_freq, key=lambda x: -x[1])
# print "\n¦ᄌᄏ₩ᄇᄏ←ニᆱ¥ᄌᆱ,¦ᄏᄑ₩ユᄌ"
# for dr in dr_sort:
# print "%s,%s" % dr
#
# print "\n¦ᄑマ←ルᄁ←ニᆱ¥ᄌᆱ,¦ᄏᄑ₩ユᄌ"
# for resident in resident_sort:
# print "%s,%s" % resident
return {'dr': dr_sort,
'resident': resident_sort,
'division': division_sort,
}
def PACSImageShowList(PersonID, SESSION = Login()):
'''
Show list of PACS Image
'''
# url = "http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/PACSImageShowList.aspx?SESSION=%s&PatClass=I&AccountIDSE=10T01921636&PersonID=%s&Hosp=T0&Seed=20100915175850&EMRPop=Y" % (SESSION,PersonID)
url = "http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/PACSImageShowList.aspx?SESSION=%s&PatClass=I&PersonID=%s&Hosp=T0&EMRPop=Y" % (SESSION,PersonID)
# print url
response = br.open(url)
body = response.read()
#<span id="BloodCallRecordDataGrid_ctl02_PatChartNo">5554778</span>
#</td><td align="left">
#<span id="BloodCallRecordDataGrid_ctl02_RequestSheetNo">T0100881014</span>
#</td><td align="left">
#
#<span id="BloodCallRecordDataGrid_ctl02_ExamDate">20100907</span>
#</td><td align="left">
#<a onclick="OpenPACs();" id="BloodCallRecordDataGrid_ctl02_LinkOrderName" href="javascript:__doPostBack('BloodCallRecordDataGrid$ctl02$LinkOrderName','')"><font color="Black">Spine:Thoracolumbar AP, LAT.</font></a>
#</td><td align="left">
#<span id="BloodCallRecordDataGrid_ctl02_Modality">DX</span>
#</td><td align="left">
#<span id="BloodCallRecordDataGrid_ctl02_VerifiedStateString">￯﾿ᆬ￯ᄒᄋ￯ᄒᄇ￯﾿ᄃ￯ᄒᄁ￯ᄒᄎ￯﾿ᄄ￯ᄒᆰ￯ᄒヘ</span>
pattern="""
<span id="(.*?)_PatChartNo">(.*?)</span>
</td><td align="left">
<span id="(.*?)_RequestSheetNo">(.*?)</span>
</td><td align="left">
<span id="(.*?)_ExamDate">(.*?)</span>
</td><td align="left">
<a onclick=(.*?)><font color="Black">(.*?)</font></a>
</td><td align="left">
<span id="(.*?)_Modality">(.*?)</span>
</td><td align="left">
<span id="(.*?)_VerifiedStateString">(.*?)</span>
"""
pattern = pattern.replace('"', '\\"')
pattern = pattern.replace('\n', '\\s*')
matches = re.findall(pattern, body)
results = []
for match in matches:
r = {}
r['PatChartNo'] = remove_space(match[1])
r['RequestSheetNo'] = remove_space(match[3])
r['ExamDate'] = remove_space(match[5])
r['LinkOrderName'] = remove_space(match[7])
r['Modality'] = remove_space(match[9])
r['VerifiedStateString'] = remove_space(match[11])
results.append(r)
return results
#def PatientMedicalRecordListQuery(PersonID, SESSION = Login()):
def PatientMedicalRecordListQuery(Chart, SESSION = Login()):
'''
Show hospital visit
'''
# print PersonID
# print Chart
url = "http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/PatientMedicalRecordListQuery.aspx?QueryBySelf=N&SESSION=%s" % SESSION
br.open(url)
br.select_form(name="Form1")
# br["NTUHWeb1$PersonIDInputTextBox"] = PersonID
br["NTUHWeb1$ChartInputTextBox"] = Chart
response = br.submit()
body = response.read()
result = {}
#已死亡?
pattern = '<span id="NTUHWeb1_PatAccountListRecord1_PatBasicDescription">(.*?)\\((.*?)\\)(.*?)</span>'
matches = re.findall(pattern, body)
try:
match = matches[0]
search = re.search('..../../..', match[2])
result['Dead'] = datetime.datetime.strptime(search.group(0),'%Y/%m/%d')
except:
result['Dead'] = None
# match = matches[0]
# if match[2].find('已死亡') != -1:
# search = re.search('..../../..', match[2])
# result['Dead'] = search.group(0)
# else:
# result['Dead'] = None
# 住
pattern ='''
<span id=".*?_InLabelHospName">(.*?)</span>
.*?
<span id=".*?_InLabelDeptName">(.*?)</span>
.*?
<span id=".*?_InLabelInDate">(.*?)</span>
.*?
<span id=".*?_InLabelOutDate">(.*?)</span>
.*?
<span id=".*?_InLabelWardName">(.*?)</span>
.*?
<span id=".*?_InLabelRoomName">(.*?)</span>
.*?
<span id=".*?_InLabelBedName">(.*?)</span>
.*?
<span id=".*?_InLabelMainDrName">(.*?)</span>
.*?
<span id=".*?_InLabelMainDiagnosisName">(.*?)</span>
.*?
<span id=".*?_InLabelStatusName">(.*?)</span>
'''
pattern = pattern.replace('"', '\\"')
pattern = pattern.replace('\n', '\\s*?')
matches = re.findall(pattern, body)
In = []
for match in matches:
r = {}
r['HospName'] = remove_space(match[0])
r['DeptName'] = remove_space(match[1])
r['InDate'] = datetime.datetime.strptime(remove_space(match[2]),'%Y/%m/%d')
# r['OutDate'] = datetime.datetime.strptime(remove_space(match[3]),'%Y/%m/%d')
try:
r['OutDate'] = datetime.datetime.strptime(remove_space(match[3]),'%Y/%m/%d')
except:
r['OutDate'] = None
r['WardName'] = remove_space(match[4])
r['RoomName'] = remove_space(match[5])
r['BedName'] = remove_space(match[6])
r['MainDrName'] = remove_space(match[7])
r['MainDiagnosisName'] = remove_space(match[8])
r['StatusName'] = remove_space(match[9])
In.append(r)
result['In'] = In
# 急
pattern ='''
<span id=".*?_LabelEmerHospName">(.*?)</span>
.*?
<span id=".*?_LabelEmerDeptName">(.*?)</span>
.*?
<span id=".*?_LabelEmerComeClinicDate">(.*?)</span>
.*?
<span id=".*?_LabelEmerDischargeDate">(.*?)</span>
.*?
<span id=".*?_LabelEmerMainDrName">(.*?)</span>
.*?
<span id=".*?_LabelEmerMainDiagnosisName">(.*?)</span>
.*?
<span id=".*?_LabelEmerStatusName">(.*?)</span>
.*?
<span id=".*?_LabelEmerTempBedID">(.*?)</span>
'''
pattern = pattern.replace('"', '\\"')
pattern = pattern.replace('\n', '\\s*?')
matches = re.findall(pattern, body)
Emer = []
for match in matches:
r = {}
r['HospName'] = remove_space(match[0])
r['DeptName'] = remove_space(match[1])
r['ComeClinicDate'] = datetime.datetime.strptime(remove_space(match[2]),'%Y/%m/%d')
try:
r['DischargeDate'] = datetime.datetime.strptime(remove_space(match[3]),'%Y/%m/%d')
except:
r['DischargeDate'] = None
r['MainDrName'] = remove_space(match[4])
r['MainDiagnosisName'] = remove_space(match[5])
r['StatusName'] = remove_space(match[6])
r['TempBedID'] = remove_space(match[7])
Emer.append(r)
result['Emer'] = Emer
# 門
pattern ='''
<span id=".*?_LabelHospName">(.*?)</span>
.*?
<span id=".*?_LabelDeptName">(.*?)</span>
.*?
<span id=".*?_LabelComeClinicDate">(.*?)</span>
.*?
<span id=".*?_LabelSpecialCureName">(.*?)</span>
.*?
<span id=".*?_LabelMainDrName">(.*?)</span>
.*?
<span id=".*?_LabelMainDiagnosisName">(.*?)</span>
.*?
<span id=".*?_LabelAccountStatusName">(.*?)</span>
'''
pattern = pattern.replace('"', '\\"')
pattern = pattern.replace('\n', '\\s*?')
matches = re.findall(pattern, body)
OutPat = []
for match in matches:
r = {}
r['HospName'] = remove_space(match[0])
r['DeptName'] = remove_space(match[1])
r['ComeClinicDate'] = datetime.datetime.strptime(remove_space(match[2]),'%Y/%m/%d')
r['SpecialCureName'] = remove_space(match[3])
r['MainDrName'] = remove_space(match[4])
r['MainDiagnosisName'] = remove_space(match[5])
r['AccountStatusName'] = remove_space(match[6])
OutPat.append(r)
result['OutPat'] = OutPat
return result
def doPostBack(form, eventTarget, eventArgument):
#Creates a new __EVENTTARGET control and adds the value specified
#.NET doesn't generate this in mechanize for some reason -- suspect maybe is
#normally generated by javascript or some useragent thing?
form.new_control('hidden','__EVENTTARGET',attrs = dict(name='__EVENTTARGET'))
form.new_control('hidden','__EVENTARGUMENT',attrs = dict(name='__EVENTARGUMENT'))
form.set_all_readonly(False)
form["__EVENTTARGET"] = eventTarget
form["__EVENTARGUMENT"] = eventArgument
def operationnotelist(Chart, SESSION = Login()):
url = "http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/PatientMedicalRecordListQuery.aspx?QueryBySelf=N&SESSION=%s" % SESSION
br.open(url)
br.select_form(name="Form1")
# br["NTUHWeb1$PersonIDInputTextBox"] = PersonID
br["NTUHWeb1$ChartInputTextBox"] = Chart
response = br.submit('NTUHWeb1$ButtonQuery')
br.select_form(name="Form1")
# request = br.click("NTUHWeb1$PatAccountListRecord1$ShowOperationList")
# response = mechanize.urlopen(request)
response = br.submit("NTUHWeb1$PatAccountListRecord1$ShowOperationList")
'''
<form name=operationnotelist action='http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/SimpleInfoShowUsingPlaceHolder.aspx?SESSION=A71FAC405B2D4E10865E94BCF1AFF563' target="operationnotelist" method="post" >
<input type=hidden name="KeyCodeList" value=2010-T0-046815|2010-T0-014806|2010-T0-014453|2010-T0-009297|2010-T0-006240|2010-T0-004275|2009-T0-056119|DWJ1211906971228>
<input type=hidden name="KeyNameList" value=SURG_2010/10/22|SURG_2010/04/10|SURG_2010/04/08|SURG_2010/03/08|SURG_2010/02/09|SURG_2010/01/29|SURG_2009/12/28|SURG_2009/12/28>
<input type=hidden name="Func" value=OPNoteList>
</form><script language='javascript'>operationnotelist.submit();</script>
'''
# print response.read()
# return
body = response.read()
pattern = 'name="KeyCodeList" value=(.*?)><input type=hidden name="KeyNameList" value=(.*?)>'
matches=re.findall(pattern, body)
# print matches[0]
# print KeyCodeList, KeyNameList
# br.select_form('operationnotelist')
# response = br.submit()
return (matches[0][0], matches[0][1])
def ShowOperationNote(KeyCodeList, KeyNameList, SESSION=Login()):
url = 'http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/SimpleInfoShowUsingPlaceHolder.aspx?SESSION=%s' % SESSION
data = {
'KeyCodeList': KeyCodeList,
'KeyNameList': KeyNameList,
'Func' : 'OPNoteList',
}
response = br.open(url, urllib.urlencode(data))
body = response.read()
pattern ='(<div class="reportQuery">.*?</div>)\\s*?</td>'
matches=re.findall(pattern, body, re.DOTALL)
return matches[0]
def dischargenotelist(Chart, SESSION = Login()):
url = "http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/PatientMedicalRecordListQuery.aspx?QueryBySelf=N&SESSION=%s" % SESSION
br.open(url)
br.select_form(name="Form1")
br["NTUHWeb1$ChartInputTextBox"] = Chart
response = br.submit('NTUHWeb1$ButtonQuery')
Notes = re.findall('NTUHWeb1\$.*?ShowDischargeNote',
response.read())
# print Notes
'''
<form name=dischargenotelist action='http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/SimpleInfoShowUsingPlaceHolder.aspx?SESSION=D32EB731A5CF4262B605EC26D2D232A8' target="dischargenote" method="post" >
<input type=hidden name="KeyCodeList" value=10T02569131|10T07485265|09T07305932>
<input type=hidden name="KeyNameList" value=外_2010/10/19|外_2010/01/08|外_2009/12/22>
<input type=hidden name="AccountIDSE" value=10T02569131>
<input type=hidden name="Func" value=DischargeSummary>
</form><script language='javascript'>dischargenotelist.submit();</script>
'''
pattern = 'name="KeyCodeList" value=(.*?)><input type=hidden name="KeyNameList" value=(.*?)><input type=hidden name="AccountIDSE" value=(.*?)>'
key = []
for Note in Notes:
# print Note
br.select_form(name="Form1")
response = br.submit(Note)
body = response.read()
matches=re.findall(pattern, body)
for match in matches:
# print match
key.append(match)
return key
def ShowDischargeNote(KeyCodeList, KeyNameList, AccountIDSE, SESSION=Login()):
url = 'http://ihisaw.ntuh.gov.tw/WebApplication/OtherIndependentProj/PatientBasicInfoEdit/SimpleInfoShowUsingPlaceHolder.aspx?SESSION=%s' % SESSION
data = {
'KeyCodeList': KeyCodeList,
'KeyNameList': KeyNameList,
'AccountIDSE': AccountIDSE,
'Func' : 'DischargeSummary',
}
response = br.open(url, urllib.urlencode(data))
body = response.read()
pattern ='(<div class="reportQuery">.*?</div>)\\s*?</td>'
matches=re.findall(pattern, body, re.DOTALL)
return matches[0]
if __name__ == "__main__":
#PatientMedicalRecordListQuery('A101116124') #已死亡
#PatientMedicalRecordListQuery('R100260467')
#PatientMedicalRecordListQuery('L200772263') #已死亡
#pp.pprint(PatientMedicalRecordListQuery('4582056'))
XrayExam('5621920')
pass

365
ntuh/ck/models.py Executable file
View file

@ -0,0 +1,365 @@
# coding=utf-8
from django.db import models
# Create your models here.
class ICD9Diag(models.Model):
code = models.CharField(max_length=5, primary_key=True)
desc = models.CharField(max_length=50)
def __unicode__(self):
return "%s.%s %s" % (self.code[0:3], self.code[3:], self.desc)
class Activity(models.Model):
title = models.CharField(max_length=200)
def __unicode__(self):
return self.title
class Admin:
pass
class Patient(models.Model):
GENDER_CHOICES = (
(1, 'Male'),
(2, 'Female'),
)
# STATUS_CHOICES = (
# ( 0, 'Male'),
# (10, 'Female'),
# )
name = models.CharField(max_length=200, verbose_name='姓名')
medical_records = models.CharField(max_length=200, unique=True)
gender = models.IntegerField(choices=GENDER_CHOICES)
birthday = models.DateField()
address = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
id_cards = models.CharField(max_length=200, unique=True)
memo = models.CharField(max_length=200, blank=True, null=True)
dead = models.DateField(blank=True, null=True)
height = models.DecimalField(max_digits=4, decimal_places=1, null=True)
weight = models.DecimalField(max_digits=6, decimal_places=3, null=True)
native = models.CharField(max_length=200, blank=True, null=True)
past_and_family_history = models.TextField(blank=True, null=True)
# last_followup = models.DateField(blank=True, null=True)
# next_followup = models.DateField(blank=True, null=True)
timestamp = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
# return self.name+' '+str(self.medical_records)
class Admin:
pass
def get_absolute_url(self):
return "/patient/detail/%i/" % self.id
class Oncologist(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Surgeon(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class DiseaseStage(models.Model):
stage = models.CharField(max_length=200)
def __unicode__(self):
return self.stage
class Admin:
pass
class PrimaryTumorSite(models.Model):
# id = models.IntegerField('ID', primary_key=True)
site = models.CharField(max_length=200)
def __unicode__(self):
return self.site
class Admin:
pass
ACCOUNTING = (
(10, '健保'),
(20, '自費'),
(30, '內含'),
)
class Treatment(models.Model):
KARNOFSKY_SCORING = (
(100, '100% - normal, no complaints, no signs of disease'),
( 90, ' 90% - capable of normal activity, few symptoms or signs of disease'),
( 80, ' 80% - normal activity with some difficulty, some symptoms or signs'),
( 70, ' 70% - caring for self, not capable of normal activity or work'),
( 60, ' 60% - requiring some help, can take care of most personal requirements'),
( 50, ' 50% - requires help often, requires frequent medical care'),
( 40, ' 40% - disabled, requires special care and help'),
( 30, ' 30% - severely disabled, hospital admission indicated but no risk of death'),
( 20, ' 20% - very ill, urgently requiring admission, requires supportive measures or treatment'),
( 10, ' 10% - moribund, rapidly progressive fatal disease processes'),
( 0, ' 0% - death'),
)
IMAGE_GUIDANCE = (
(100, '6D Skull'),
(200, 'Xsight-Spine'),
(210, 'Xsight-Lung'),
(300, 'Fiducial'),
(400, 'Synchrony'),
)
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
bed = models.CharField(max_length=200, blank=True, null=True, verbose_name='病床號')
icd9 = models.ForeignKey(ICD9Diag, blank=True, null=True, verbose_name='ICD9診斷', on_delete=models.SET_NULL)
other_diagnosis = models.CharField(max_length=200, blank=True, null=True, verbose_name='其他診斷')
tracking_mode = models.IntegerField(choices=IMAGE_GUIDANCE, blank=True, null=True)
referral = models.CharField(max_length=200, blank=True, null=True, verbose_name='轉介醫師')
oncologist = models.ForeignKey(Oncologist, blank=True, null=True, on_delete=models.SET_NULL)
surgeon = models.ForeignKey(Surgeon, blank=True, null=True, on_delete=models.SET_NULL)
date_started = models.DateField(blank=True, null=True)
date_completed = models.DateField(blank=True, null=True)
accounting = models.IntegerField(choices=ACCOUNTING, blank=True, null=True, verbose_name='記帳別')
karnofsky_score = models.IntegerField(choices=KARNOFSKY_SCORING, blank=True, null=True)
disease_stage = models.ForeignKey(DiseaseStage, blank=True, null=True, on_delete=models.SET_NULL)
primary_tumor_site = models.ForeignKey(PrimaryTumorSite, blank=True, null=True, on_delete=models.SET_NULL)
input = models.ForeignKey(Activity, related_name='input', blank=True, null=True, verbose_name='', on_delete=models.SET_NULL)
output = models.ForeignKey(Activity, related_name='output', blank=True, null=True, verbose_name='', on_delete=models.SET_NULL)
complications = models.CharField(max_length=200, blank=True, null=True, verbose_name='併發症')
chief_complaint = models.TextField(blank=True, null=True)
memo = models.CharField(max_length=200, blank=True, null=True)
timestamp = models.DateTimeField(auto_now=True)
class Admin:
pass
def get_absolute_url(self):
return "/treatment/detail/%i/" % self.id
class Price(models.Model):
code = models.CharField(max_length=200, blank=True, null=True)
identity = models.IntegerField(choices=ACCOUNTING)
name = models.CharField(max_length=200)
unit = models.CharField(max_length=200, blank=True, null=True)
address = models.IntegerField(blank=True, null=True)
class Admin:
pass
def __unicode__(self):
return self.code + '-' + self.get_identity_display() + '-' + self.name
class VEVENT(models.Model):
# iCalendar
DTSTAMP = models.DateTimeField(auto_now=True)
DTSTART = models.DateTimeField()
DTEND = models.TimeField(blank=True)
DURATION = models.TimeField()
SUMMARY = models.CharField(max_length=200, blank=True, null=True)
CLASS = models.CharField(max_length=200, blank=True, null=True)
CATEGORIES = models.CharField(max_length=200, blank=True, null=True)
TRANSP = models.CharField(max_length=200, blank=True, null=True)
RRULE = models.CharField(max_length=200, blank=True, null=True)
DESCRIPTION = models.CharField(max_length=200, blank=True, null=True)
MODE_CHOICES = (
(110, 'Fiducial'),
(200, '固定器'),
(210, 'CT'),
(220, 'MRI'),
(230, 'Angio'),
(310, '治療'),
)
treatment = models.ForeignKey(Treatment, on_delete=models.CASCADE)
mode = models.IntegerField(choices=MODE_CHOICES)
price = models.ForeignKey(Price, blank=True, null=True, on_delete=models.SET_NULL)
break_frequency = models.IntegerField(blank=True)
system_err = models.IntegerField(blank=True)
shift = models.IntegerField(blank=True)
cone = models.IntegerField(blank=True)
path = models.IntegerField(blank=True)
def get_absolute_url(self):
return "/treatment/detail/%i/" % self.treatment.id
class TargetLocation(models.Model):
location = models.CharField(max_length=200)
def __unicode__(self):
return self.location
class Admin:
pass
class Pathology(models.Model):
pathology = models.CharField(max_length=200, unique=True)
stage = models.ForeignKey(DiseaseStage, on_delete=models.CASCADE)
def __unicode__(self):
return self.pathology
class Admin:
pass
class SubLocation(models.Model):
target_location = models.ForeignKey(TargetLocation, on_delete=models.CASCADE)
group = models.IntegerField()
sub_location = models.CharField(max_length=200)
pathology = models.ManyToManyField(Pathology)
def __unicode__(self):
# return TargetLocation.objects.get(id=self.target_location)+' - '+self.sub_location
return self.sub_location
class Admin:
pass
class Lesion(models.Model):
treatment = models.ForeignKey(Treatment, on_delete=models.CASCADE)
sub_location = models.ForeignKey(SubLocation, on_delete=models.CASCADE)
pathology = models.ForeignKey(Pathology, on_delete=models.CASCADE)
dimensions = models.CharField(max_length=200)
volume = models.DecimalField(max_digits=9, decimal_places=2)
plan_name = models.CharField(max_length=200, blank=True, null=True)
collimator = models.CharField(max_length=200)
path_no = models.IntegerField()
beam_no = models.IntegerField()
mu_max = models.DecimalField(max_digits=9, decimal_places=2)
mu_min = models.DecimalField(max_digits=9, decimal_places=2)
dose = models.IntegerField()
fractions = models.IntegerField()
iso_dose_curve = models.IntegerField()
dmin = models.DecimalField(max_digits=9, decimal_places=2)
dmax = models.DecimalField(max_digits=9, decimal_places=2)
coverage = models.DecimalField(max_digits=9, decimal_places=2)
ci = models.DecimalField(max_digits=9, decimal_places=2)
nci = models.DecimalField(max_digits=9, decimal_places=2)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
memo = models.CharField(max_length=200, blank=True, null=True)
def get_absolute_url(self):
return "/treatment/detail/%i/" % self.treatment.id
def sub_location_target_location(self):
return self.sub_location.target_location
def treatment_id(self):
return treatment.id
class PriorTreatment(models.Model):
TREATMENT_CHOICES = (
(1, 'Surgery'),
(2, 'Biopsy'),
(3, 'RT'),
(4, 'Radiosurgery'),
(5, 'Chemotherapy'),
)
PERIOD_CHOICES = (
(1, 'Before'),
(2, 'Concurrent'),
(3, 'None'),
)
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
date = models.DateField()
treatment = models.IntegerField(choices=TREATMENT_CHOICES)
period = models.IntegerField(choices=PERIOD_CHOICES, blank=True, null=True)
dose = models.IntegerField(blank=True, null=True)
memo = models.CharField(max_length=200, blank=True, null=True)
def get_absolute_url(self):
return "/patient/detail/%i/" % self.patient.id
class PathExam(models.Model):
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
path_code = models.CharField(max_length=200, unique=True, verbose_name='病理號')
specimen_code = models.CharField(max_length=200, verbose_name='檢體')
specimen_get_date = models.DateField(verbose_name='收件日')
report_date = models.DateField(verbose_name='報告日')
division = models.CharField(max_length=200, verbose_name='科別')
bed = models.CharField(max_length=200, blank=True, null=True, verbose_name='病床')
report = models.TextField(blank=True, null=True, verbose_name='檢查報告')
class Followup(models.Model):
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
# date = models.DateField(auto_now_add=True)
date = models.DateField()
memo = models.CharField(max_length=200, blank=True, null=True)
def get_absolute_url(self):
return "/patient/detail/%i/" % self.patient.id
class PACSImage(models.Model):
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
PatChartNo = models.CharField(max_length=200, verbose_name='病歷號')
RequestSheetNo = models.CharField(max_length=200, verbose_name='單號')
ExamDate = models.DateField( verbose_name='檢查日')
LinkOrderName = models.CharField(max_length=200, verbose_name='檢查名稱')
Modality = models.CharField(max_length=200, verbose_name='儀器')
VerifiedStateString = models.CharField(max_length=200, verbose_name='狀態')
SAVED_CHOICES = (
(0 , '待處理'),
(10, '有輸入'),
(15, '已確認'),
(20, '不適用'),
)
# Saved = models.BooleanField()
Saved = models.IntegerField(choices=SAVED_CHOICES, verbose_name='保存')
class LesionFollow(models.Model):
Lesion = models.ForeignKey(Lesion, on_delete=models.CASCADE)
Date = models.DateField(null=False, verbose_name='追蹤日期')
Volume = models.FloatField(null=True, verbose_name='體積(mm3)')
A = models.FloatField(null=True, verbose_name='長(mm)')
B = models.FloatField(null=True, verbose_name='寬(mm)')
C = models.FloatField(null=True, verbose_name='高(mm)')
Memo = models.CharField(max_length=200, blank=True, null=True)
class MedicalRecord(models.Model):
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
Record = models.CharField(max_length=200, null=True) # 住急門
HospName = models.CharField(max_length=200, null=True)
DeptName = models.CharField(max_length=200, null=True, verbose_name='')
InDate = models.DateField( null=False, verbose_name='')
OutDate = models.DateField( null=True, verbose_name='')
WardName = models.CharField(max_length=200, null=True, verbose_name='')
RoomName = models.CharField(max_length=200, null=True, verbose_name='')
BedName = models.CharField(max_length=200, null=True, verbose_name='')
MainDrName = models.CharField(max_length=200, null=True, verbose_name='主治')
MainDiagnosisName = models.CharField(max_length=200, null=True, verbose_name='診斷')
StatusName = models.CharField(max_length=200, null=True, verbose_name='狀態')
SpecialCureName = models.CharField(max_length=200, null=True, verbose_name='行為')

2071
ntuh/ck/selenium.py Executable file

File diff suppressed because it is too large Load diff

35
ntuh/ck/stest.py Executable file
View file

@ -0,0 +1,35 @@
from selenium import selenium
import unittest, time, re
class test(unittest.TestCase):
def setUp(self):
self.verificationErrors = []
# self.selenium = selenium("localhost", 4444, "*chrome", "http://portal/")
# self.selenium = selenium("localhost", 4444, r"*googlechrome", "http://portal/")
# self.selenium = selenium("localhost", 4444, r"*googlechromeC:\Program Files\Google\Chrome\Application\chrome.exe", "http://portal/")
self.selenium = selenium("localhost", 4444, "*iexplore", "http://portal/")
self.selenium.start()
def test_test(self):
sel = self.selenium
sel.open("/General/Login.aspx")
sel.type("txtUserID", "004552")
sel.type("txtPass", "N122119493")
sel.click("rdblQuickMenu_0")
sel.click("imgBtnSubmitNew")
sel.wait_for_page_to_load("30000")
sel.click("link2")
sel.wait_for_pop_up("_self", "30000")
sel.click("NTUHWeb1_ChartInputTextBox")
sel.type("NTUHWeb1_ChartInputTextBox", "5527107")
sel.click("NTUHWeb1_ButtonQuery")
sel.wait_for_page_to_load("30000")
sel.click("NTUHWeb1_PatAccountListRecord1_ShowOperationList")
sel.wait_for_page_to_load("30000")
def tearDown(self):
self.selenium.stop()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()

View file

View file

@ -0,0 +1,12 @@
from django import template
import datetime
register = template.Library()
def age(bday, d=None):
if d is None:
d = datetime.date.today()
return (d.year - bday.year) - int((d.month, d.day) < (bday.month, bday.day))
register.filter('age', age)

109
ntuh/ck/templatetags/stack.py Executable file
View file

@ -0,0 +1,109 @@
from django import template
import math
register = template.Library()
class stack:
def __init__(self):
self.stack = []
def push(self, o):
self.stack.append(o)
#print 'push', self.stack
def pop(self):
if len(self.stack) == 0:
# raise KeyError, "Stack is empty"
raise KeyError("Stack is empty")
o = self.stack[-1]
#print 'pop', self.stack
del self.stack[-1]
return o
def is_empty(self):
return len(self.stack) == 0
def __len__(self):
return len(self.stack)
# truncate a floating point number only if it has no decimal part (convert from string if necessary)
def number(num):
f = float(num)
i = int(f)
if i == f: #FIXME: floating point equality?
return i
return f
stacks = {}
@register.filter
def stnew(value):
#print 'stnew'
stacks[value] = stack()
return value
@register.filter
def stpush(value, arg):
#print 'stpush:',
stacks[value].push(number(arg))
return value
@register.filter
def stpop(value):
#print 'stpop:',
if value in stacks:
stacks[value].pop()
return value
@register.filter
def stget(value):
#print 'stget:',
if value in stacks:
return stacks[value].pop()
@register.filter
def stadd(value):
#print 'stadd:',
two = stacks[value].pop()
one = stacks[value].pop()
stacks[value].push(one + two)
return value
@register.filter
def stsub(value):
#print 'stsub:',
two = stacks[value].pop()
one = stacks[value].pop()
stacks[value].push(one - two)
return value
@register.filter
def stmult(value):
#print 'stmult:',
two = stacks[value].pop()
one = stacks[value].pop()
stacks[value].push(one * two)
return value
@register.filter
def stdiv(value):
#print 'stdiv:',
two = stacks[value].pop()
one = stacks[value].pop()
stacks[value].push(number(float(one) / float(two)))
return value
@register.filter
def stmod(value):
two = stacks[value].pop()
one = stacks[value].pop()
stacks[value].push(one % two)
return value
@register.filter
def stsqrt(value):
one = stacks[value].pop()
stacks[value].push(math.sqrt(one))
return value

23
ntuh/ck/tests.py Executable file
View file

@ -0,0 +1,23 @@
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}

269
ntuh/ck/unf.py Executable file
View file

@ -0,0 +1,269 @@
#!/usr/bin/python
# coding=utf-8
from intra import *
import datetime
import time
def unf():
unf1 = unf_sort(-30,-7)
dr1 = unf1['dr']
resident1 = unf1['resident']
division1 = unf1['division']
unf2 = unf_sort(-365,-31)
dr2 = unf2['dr']
resident2 = unf2['resident']
division2 = unf2['division']
length = max(len(dr1),
len(resident1),
len(dr2),
len(resident2))
result=[]
result.append(['主治醫師','科別','份數','住院醫師','份數','主治醫師','科別','份數','住院醫師','份數'])
d1 = 0
r1 = 0
d2 = 0
r2 = 0
for i in range(length):
r = []
if i < len(dr1):
r.append(dr1[i][0])
if vs.has_key(dr1[i][0]):
r.append(vs[dr1[i][0]])
else:
r.append('')
r.append(dr1[i][1])
d1 += dr1[i][1]
else:
r.append('')
r.append('')
r.append('')
if i < len(resident1):
r.append(resident1[i][0])
r.append(resident1[i][1])
r1 += resident1[i][1]
else:
r.append('')
r.append('')
if i < len(dr2):
r.append(dr2[i][0])
if vs.has_key(dr2[i][0]):
r.append(vs[dr2[i][0]])
else:
r.append('')
r.append(dr2[i][1])
d2 += dr2[i][1]
else:
r.append('')
r.append('')
r.append('')
if i < len(resident2):
r.append(resident2[i][0])
r.append(resident2[i][1])
r2 += resident2[i][1]
else:
r.append('')
r.append('')
result.append(r)
# output = open('/home/xfr/mysite/site_media/unf.html','w')
output = open('/SharedDocs/html/media.ntuh.net/unf.html','w')
print >> output, """
<html>
<head>
<meta http-equiv=Content-Type content="text/html; charset=utf-8">
<title>外科部病歷未完成</title>
</head>
<body>
"""
print >> output, "<table><tbody align='center'>"
print >> output, "<tr><td colspan='10'>%s</td></tr>" % time.asctime()
print >> output, "<tr><td colspan='5'>%s</td><td colspan='5'>%s</td></tr>" % ('7至30日','超過30日')
for r in result:
print >> output ,"<tr>"
for c in r:
print >> output, "<td>%s</td>" % c
print >> output ,"</tr>"
print >> output, "<tr><td>---</td></tr>"
print >> output, "<tr>"
print >> output, "<td>總計</td><td></td><td>%i</td>" % d1
print >> output, "<td>總計</td><td>%i</td>" % r1
print >> output, "<td>總計</td><td></td><td>%i</td>" % d2
print >> output, "<td>總計</td><td>%i</td>" % r2
print >> output, "</tr>"
print >> output, "</tbody></table>"
print >> output, "<hr/>"
print >> output, "<table><tbody align='center'>"
div1 = dict(division1)
div2 = dict(division2)
for div in div1.keys():
if not div2.has_key(div):
div2[div] = 0
for div in div2.keys():
if not div1.has_key(div):
div1[div] = 0
div3 = {}
for div in div1.keys():
div3[div] = div1[div] + div2[div]
# print div1
# print div2
# print div3
division_sort = sorted(list(div3), key=lambda x: -div3[x])
# print division_sort
print >> output, "<tr><td></td>"
for div in division_sort:
print >> output, "<td>%s</td>" % div
print >> output, "</tr>"
print >> output, "<tr><td>7至30日</td>"
for div in division_sort:
print >> output, "<td>%s</td>" % div1[div]
print >> output, "</tr>"
print >> output, "<tr><td>超過30日</td>"
for div in division_sort:
print >> output, "<td>%s</td>" % div2[div]
print >> output, "</tr>"
print >> output, "<tr><td>合計</td>"
for div in division_sort:
print >> output, "<td>%s</td>" % div3[div]
print >> output, "</tr>"
print >> output, "</tbody></table>"
print >> output, "</body></html>"
output.close()
return result
def unf_month():
day = datetime.date.today().day
EndDate = datetime.date.today() + datetime.timedelta(days=-day)
unf1 = unf_sort(-365,-day)
dr1 = unf1['dr']
resident1 = unf1['resident']
division1 = unf1['division']
length = max(len(dr1),
len(resident1))
result=[]
result.append(['主治醫師','科別','份數','住院醫師','份數'])
d1 = 0
r1 = 0
for i in range(length):
r = []
if i < len(dr1):
r.append(dr1[i][0])
if vs.has_key(dr1[i][0]):
r.append(vs[dr1[i][0]])
else:
r.append('')
r.append(dr1[i][1])
d1 += dr1[i][1]
else:
r.append('')
r.append('')
r.append('')
if i < len(resident1):
r.append(resident1[i][0])
r.append(resident1[i][1])
r1 += resident1[i][1]
else:
r.append('')
r.append('')
result.append(r)
# output = open('/home/xfr/mysite/site_media/unf.html','w')
output = open('/SharedDocs/html/media.ntuh.net/unf_month.html','w')
print >> output, """
<html>
<head>
<meta http-equiv=Content-Type content="text/html; charset=utf-8">
<title>上月外科部病歷未完成</title>
</head>
<body>
"""
print >> output, "<table><tbody align='center'>"
print >> output, "<tr><td colspan='5'>%s</td></tr>" % time.asctime()
print >> output, "<tr><td colspan='5'>%s前</td></tr>" % EndDate
for r in result:
print >> output ,"<tr>"
for c in r:
print >> output, "<td>%s</td>" % c
print >> output ,"</tr>"
print >> output, "<tr><td>---</td></tr>"
print >> output, "<tr>"
print >> output, "<td>總計</td><td></td><td>%i</td>" % d1
print >> output, "<td>總計</td><td>%i</td>" % r1
print >> output, "</tr>"
print >> output, "</tbody></table>"
print >> output, "<hr/>"
print >> output, "<table><tbody align='center'>"
div1 = dict(division1)
# print div1
# print div2
# print div3
division_sort = sorted(list(div1), key=lambda x: -div1[x])
# print division_sort
print >> output, "<tr><td></td>"
for div in division_sort:
print >> output, "<td>%s</td>" % div
print >> output, "</tr>"
print >> output, "<tr><td>合計</td>"
for div in division_sort:
print >> output, "<td>%s</td>" % div1[div]
print >> output, "</tr>"
print >> output, "</tbody></table>"
print >> output, "</body></html>"
output.close()
return result
unf()
unf_month()

1380
ntuh/ck/views.py Executable file

File diff suppressed because it is too large Load diff

62
ntuh/ck/vs.csv Executable file
View file

@ -0,0 +1,62 @@
CS,徐紹勛
CS,李元麒
CS,李章銘
CS,陳晉興
CS,黃培銘
CVS,吳毅暉
CVS,周迺寬
CVS,張重義
CVS,王植賢
CVS,王水深
CVS,紀乃新
CVS,虞希禹
CVS,許榮彬
CVS,邱英世
CVS,陳益祥
CVS,黃書健
GS,何承懋
GS,何明志
GS,吳耀銘
GS,張金堅
GS,李伯皇
GS,林明燦
GS,林本仁
GS,梁金銅
GS,楊卿堯
GS,王明暘
GS,田郁文
GS,胡瑞恆
GS,蔡孟昆
GS,袁瑞晃
GS,賴逸儒
GS,郭文宏
GS,陳坤源
GS,陳炯年
GS,黃俊升
GS,黃凱文
GS,黃約翰
ICU,柯文哲
NS,曾勝弘
NS,曾漢民
NS,杜永光
NS,楊士弘
NS,王國川
NS,蔡瑞章
NS,蕭輔仁
NS,賴達明
NS,郭夢菲
NS,陳敞牧
NS,黃勝堅
PED,林文熙
PED,蔡明憲
PED,許文明
PED,賴鴻緒
PS,戴浩志
PS,楊永健
PS,洪學義
PS,湯月碧
PS,簡雄飛
PS,謝孟祥
PS,謝榮賢
PS,郭源松
PS,鄭乃禎
1 CS 徐紹勛
2 CS 李元麒
3 CS 李章銘
4 CS 陳晉興
5 CS 黃培銘
6 CVS 吳毅暉
7 CVS 周迺寬
8 CVS 張重義
9 CVS 王植賢
10 CVS 王水深
11 CVS 紀乃新
12 CVS 虞希禹
13 CVS 許榮彬
14 CVS 邱英世
15 CVS 陳益祥
16 CVS 黃書健
17 GS 何承懋
18 GS 何明志
19 GS 吳耀銘
20 GS 張金堅
21 GS 李伯皇
22 GS 林明燦
23 GS 林本仁
24 GS 梁金銅
25 GS 楊卿堯
26 GS 王明暘
27 GS 田郁文
28 GS 胡瑞恆
29 GS 蔡孟昆
30 GS 袁瑞晃
31 GS 賴逸儒
32 GS 郭文宏
33 GS 陳坤源
34 GS 陳炯年
35 GS 黃俊升
36 GS 黃凱文
37 GS 黃約翰
38 ICU 柯文哲
39 NS 曾勝弘
40 NS 曾漢民
41 NS 杜永光
42 NS 楊士弘
43 NS 王國川
44 NS 蔡瑞章
45 NS 蕭輔仁
46 NS 賴達明
47 NS 郭夢菲
48 NS 陳敞牧
49 NS 黃勝堅
50 PED 林文熙
51 PED 蔡明憲
52 PED 許文明
53 PED 賴鴻緒
54 PS 戴浩志
55 PS 楊永健
56 PS 洪學義
57 PS 湯月碧
58 PS 簡雄飛
59 PS 謝孟祥
60 PS 謝榮賢
61 PS 郭源松
62 PS 鄭乃禎

62
ntuh/ck/vs0.csv Executable file
View file

@ -0,0 +1,62 @@
GS,<2C><><EFBFBD>
PED,苦翬狐
GS,癒风<E79992>
GS,独玊ど
GS,狶<>
PS,傣る貉
ICU,琠ゅ<E790A0>
CS,<2C>じ腝
GS,璊风<E7928A>
NS,<2C><EFBFBD>
PED,狶ゅ撼
CS,畗残吃
NS,朝疮<E69C9D>
PS,尝方猀
GS,朝<>
GS,<2C><>в
CVS,㏄癷糴
PS,虏动<E8998F>
GS,バ<>
NS,苦笷<E88BA6>
CVS,<2C><EFBFBD>
GS,辩<>
CVS,朝痲不
PS,瑇厩竡
GS,朝<><E69C9D>
PS,拦疎в
GS,尝ゅЩ
CVS,砛篴眑
CS,<2C>彻皇
PED,讲<>
NS,<2C><EFBFBD>
PS,綠<>
NS,独秤绊
CS,朝<>
GS,<2C>模皇
PS,法ッ胺
NS,纯簙チ
NS,纯秤グ
CVS,<2C><>
CVS,眎<>
PS,谅﹕不
GS,苦秇晶
GS,讲﹕<E8AEB2>
CVS,阜<><E9989C>
PED,砛ゅ<E7A09B>
CS,独蚌皇
CVS,独<>
GS,法<>
NS,讲风彻
NS,尝冠滇
GS,狶セく
NS,法<>
PS,谅篴藉
NS,拷徊く
GS,<2C>┯婪
CVS,<2C>从藉
CVS,<2C>驾穟
GS,眎<>
GS,独<>
GS,独惩ゅ
GS,<2C><>
CVS,<2C><>
1 GS ���
2 PED 苦翬狐
3 GS 癒风�
4 GS 独玊ど
5 GS 狶�篱
6 PS 傣る貉
7 ICU 琠ゅ�
8 CS �じ腝
9 GS 璊风�
10 NS �ッ�
11 PED 狶ゅ撼
12 CS 畗残吃
13 NS 朝疮�
14 PS 尝方猀
15 GS 朝�方
16 GS ��в
17 CVS ㏄癷糴
18 PS 虏动�
19 GS バ�ゅ
20 NS 苦笷�
21 CVS �璣�
22 GS 辩�簧
23 CVS 朝痲不
24 PS 瑇厩竡
25 GS 朝��
26 PS 拦疎в
27 GS 尝ゅЩ
28 CVS 砛篴眑
29 CS �彻皇
30 PED 讲�舅
31 NS �瓣�
32 PS 綠�赫
33 NS 独秤绊
34 CS 朝�砍
35 GS �模皇
36 PS 法ッ胺
37 NS 纯簙チ
38 NS 纯秤グ
39 CVS ��瞏
40 CVS 眎�竡
41 PS 谅﹕不
42 GS 苦秇晶
43 GS 讲﹕�
44 CVS 阜��
45 PED 砛ゅ�
46 CS 独蚌皇
47 CVS 独�胺
48 GS 法�丑
49 NS 讲风彻
50 NS 尝冠滇
51 GS 狶セく
52 NS 法�グ
53 PS 谅篴藉
54 NS 拷徊く
55 GS �┯婪
56 CVS �从藉
57 CVS �驾穟
58 GS 眎�绊
59 GS 独�揩
60 GS 独惩ゅ
61 GS ��穤
62 CVS ��穝

14
ntuh/context_processors.py Executable file
View file

@ -0,0 +1,14 @@
# -*- coding: utf-8 -*-
from ntuh import settings
import os
ABSPATH = os.path.abspath(os.path.dirname(__file__))
def context_processors(request):
return {
'open_icon_library': 'http://www.ntuh.net/open_icon_library-full/',
'specialty': settings.SPECIALTY,
}

16
ntuh/cron.sh Executable file
View file

@ -0,0 +1,16 @@
#!/bin/bash
PATH=$PATH:/usr/local/bin
#. /home/ntuh/d111/bin/activate
#conda init bash
. /opt/conda/etc/profile.d/conda.sh
conda activate django2
locale
cd $(dirname $0)
python getop.py
python getpatho.py
#python get_inpatient.py

8
ntuh/cron_hourly.sh Executable file
View file

@ -0,0 +1,8 @@
#!/bin/bash
. /home/ntuh/d18env/bin/activate
locale
cd $(dirname $0)
python getop.py

15
ntuh/cron_xray.sh Executable file
View file

@ -0,0 +1,15 @@
#!/bin/bash
PATH=$PATH:/usr/local/bin
#. /home/ntuh/d111/bin/activate
#conda init bash
. /opt/conda/etc/profile.d/conda.sh
conda activate django2
locale
cd $(dirname $0)
python getctmr.py
/usr/bin/mysqldump -pn122119493 adm15 registry_xraytextreport > registry_xraytextreport.sql
/usr/bin/rsync -Pz -e "ssh -p 28808" registry_xraytextreport.sql root@mdi.bar:

80
ntuh/doc/plastic_category.txt Executable file
View file

@ -0,0 +1,80 @@
手術
A. Congenital Deformity
1.Cleft lip and palate
2.Ear reconstruction
3.Hand
4.Foot
5.Craniofacial surgery
6.Others
B. Traumatic
1.Soft tissue repair
2.Facial fracture repair
Nasal bone fracture
Mandible fracture
Zygomatic fracture
Maxillary fracture
Orbital blow out fracture
Panfacial fracture
C. Hand surgery
1.Tendon
2.Nerve
3.Vessel repair
4.Fracture reduction
5.Soft tissue repair
6.Replantation
7.Others
D. Aesthetic
1.Face lift
2.Blepharoplasty
3.Rhinoplasty
4.Facial contouring and genioplasty
5.Fat grafting
6.Body contouring and liposuction
7.Breast
Augmentation
Reduction
Gynecomastia
Nipple-areolar complex
8.Scar revision
9.Osmidrosis
10.Others
E. Microvascular and reconstructive Surgery
1.Free flap transfer
Breast reconstruction
Head and neck
Functional muscle transfer
Extremity
2.Replantation of digit
3.Replantation of extremity
4.Others (flap revision /commissuroplasty)
5.Breast Implant
F. Benign and malignant tumors
1.Skin and Soft tissue tumor, benign
Hemangioma or vascular malformation
Neurofibroma
Others
2.Skin, malignancy
Basal cell carcinoma
Squamous cell carcinoma
Malignant melanoma
Others
3.Soft tissue, malignancy
4.Salivary Gland Tumor
5.Others
G. Miscellaneous
1.Debridement
2.STSG
3.FTSG
4.Local flap
5.Pedicle flap / division
6.Varicose vein
7.Nerve graft
8.Fat graft
9.Allograft
10.Amputation
11.Others
H. Burn (acute)
1.Debridement
H. Burn (late effect)
1.Tissue expander
2.Others

16
ntuh/dojango/__init__.py Executable file
View file

@ -0,0 +1,16 @@
VERSION = (0, 5, 2, 'final', 0)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
#from django.utils.version import get_svn_revision
#svn_rev = get_svn_revision()
#if svn_rev != u'SVN-unknown':
# version = "%s %s" % (version, svn_rev)
return version

20
ntuh/dojango/appengine/README Executable file
View file

@ -0,0 +1,20 @@
This directory contains some helpers for running dojango on appengine.
memcache_zipserve.py:
Part of http://code.google.com/p/google-app-engine-samples/:
Using zipserve to serve the media-files. After the first use they'll be
cached in memcache. Modified to support last-modified-headers (so we have
a real CDN!)
dojo_serve.py:
Helper for serving the whole dojo release folder, that holds the dojo
modules as zipfiles.
It can be used within app.yaml (AppEngine configuration file) like this:
- url: /dojango/media/release/.*
script: dojango/appengine/dojo_serve.py
Afterwards all zip-files within /dojango/media/release/DOJANGO_DOJO_VERSION/
will be served and cached.

View file

View file

@ -0,0 +1,51 @@
import os
import wsgiref.handlers
from dojango.appengine import memcache_zipserve
from google.appengine.ext import webapp
# setup the environment
from common.appenginepatch.aecmd import setup_env
setup_env(manage_py_env=True)
from dojango.conf import settings
# creating a handler structure for the zip-files within the release folder
release_dir = '%s/release/%s' % (settings.BASE_MEDIA_ROOT, settings.DOJO_VERSION)
handlers = []
for zip_file in os.listdir(release_dir):
if zip_file.endswith(".zip"):
module = os.path.splitext(zip_file)[0]
handler = [os.path.join(release_dir, zip_file)]
handlers.append(handler)
class FlushCache(webapp.RequestHandler):
"""
Handler for flushing the whole memcache instance.
"""
from google.appengine.ext.webapp.util import login_required
@login_required
def get(self):
from google.appengine.api import memcache
from google.appengine.api import users
if users.is_current_user_admin():
stats = memcache.get_stats()
memcache.flush_all()
self.response.out.write("Memcache successfully flushed!<br/>")
if stats:
self.response.out.write("<p>Memcache stats:</p><p>")
for key in stats.keys():
self.response.out.write("%s: %s<br/>" % (key, stats[key]))
self.response.out.write("</p>")
def main():
application = webapp.WSGIApplication([
('%s/%s/(.*)' % (settings.BUILD_MEDIA_URL, settings.DOJO_VERSION),
memcache_zipserve.create_handler(handlers, max_age=31536000)
),
('%s/_flushcache[/]{0,1}' % settings.BUILD_MEDIA_URL, FlushCache)
], debug=False)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,435 @@
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A class to serve pages from zip files and use memcache for performance.
This contains a class and a function to create an anonymous instance of the
class to serve HTTP GET requests. Memcache is used to increase response speed
and lower processing cycles used in serving. Credit to Guido van Rossum and
his implementation of zipserve which served as a reference as I wrote this.
NOTE: THIS FILE WAS MODIFIED TO SUPPORT CLIENT CACHING
MemcachedZipHandler: Class that serves request
create_handler: method to create instance of MemcachedZipHandler
"""
__author__ = 'j.c@google.com (Justin Mattson)'
import email.Utils
import datetime
import logging
import mimetypes
import os
import time
import zipfile
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from django.utils.hashcompat import md5_constructor
def create_handler(zip_files, max_age=None, public=None, client_caching=None):
"""Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0:
if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
if max_age is not None:
self.MAX_AGE = max_age
if public is not None:
self.PUBLIC = public
if client_caching is not None:
self.CLIENT_CACHING = client_caching
self.TrueGet(name)
return HandlerWrapper
class CacheFile(object):
pass
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
current_last_modified = None # where we save the current last modified datetime
current_etag = None # the current ETag of a file served
CLIENT_CACHING = True # is client caching enabled? (sending Last-Modified and ETag within response!)
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = "cache://" # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = "noncache://" # memcache key prefix for non-existant URL
def TrueGet(self, name):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method
Args:
name: URL requested
Returns:
None
"""
name = self.PreprocessUrl(name)
# see if we have the page in the memcache
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info('Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None or resp_data == -1:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
else:
logging.info('Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return
else:
self.Write404Error()
return
content_type, encoding = mimetypes.guess_type(name)
if content_type:
self.response.headers['Content-Type'] = content_type
self.current_last_modified = resp_data.lastmod
self.current_etag = resp_data.etag
self.SetCachingHeaders()
# if the received ETag matches
if resp_data.etag == self.request.headers.get('If-None-Match'):
self.error(304)
return
# if-modified-since was passed by the browser
if self.request.headers.has_key('If-Modified-Since'):
dt = self.request.headers.get('If-Modified-Since').split(';')[0]
modsince = datetime.datetime.strptime(dt, "%a, %d %b %Y %H:%M:%S %Z")
if modsince >= self.current_last_modified:
# The file is older than the cached copy (or exactly the same)
self.error(304)
return
self.response.out.write(resp_data.file)
def PreprocessUrl(self, name):
"""Any preprocessing work on the URL when it comes it.
Put any work related to interpretting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
The processed URL
"""
if name[len(name) - 1:] == '/':
return "%s%s" % (name, 'index.html')
else:
return name
def GetFromStore(self, file_path):
"""Retrieve file from zip files.
Get the file from the source, it must not have been in the memcache. If
possible, we'll use the zip file index to quickly locate where the file
should be found. (See MapToFileArchive documentation for assumptions about
file ordering.) If we don't have an index or don't find the file where the
index says we should, look through all the zip files to find it.
Args:
file_path: the file that we're looking for
Returns:
The contents of the requested file
"""
resp_data = None
file_itr = iter(self.zipfilenames)
# check the index, if we have one, to see what archive the file is in
archive_name = self.MapFileToArchive(file_path)
if not archive_name:
archive_name = file_itr.next()[0]
while resp_data is None and archive_name:
zip_archive = self.LoadZipFile(archive_name)
if zip_archive:
# we expect some lookups will fail, and that's okay, 404s will deal
# with that
try:
resp_data = CacheFile()
info = os.stat(archive_name)
#lastmod = datetime.datetime.fromtimestamp(info[8])
lastmod = datetime.datetime(*zip_archive.getinfo(file_path).date_time)
resp_data.file = zip_archive.read(file_path)
resp_data.lastmod = lastmod
resp_data.etag = '"%s"' % md5_constructor(resp_data.file).hexdigest()
except (KeyError, RuntimeError), err:
# no op
x = False
resp_data = None
if resp_data is not None:
logging.info('%s read from %s', file_path, archive_name)
try:
archive_name = file_itr.next()[0]
except (StopIteration), err:
archive_name = False
return resp_data
def LoadZipFile(self, zipfilename):
"""Convenience method to load zip file.
Just a convenience method to load the zip file from the data store. This is
useful if we ever want to change data stores and also as a means of
dependency injection for testing. This method will look at our file cache
first, and then load and cache the file if there's a cache miss
Args:
zipfilename: the name of the zip file to load
Returns:
The zip file requested, or None if there is an I/O error
"""
zip_archive = None
zip_archive = self.zipfile_cache.get(zipfilename)
if zip_archive is None:
try:
zip_archive = zipfile.ZipFile(zipfilename)
self.zipfile_cache[zipfilename] = zip_archive
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename,
err))
return zip_archive
def MapFileToArchive(self, file_path):
"""Given a file name, determine what archive it should be in.
This method makes two critical assumptions.
(1) The zip files passed as an argument to the handler, if concatenated
in that same order, would result in a total ordering
of all the files. See (2) for ordering type.
(2) Upper case letters before lower case letters. The traversal of a
directory tree is depth first. A parent directory's files are added
before the files of any child directories
Args:
file_path: the file to be mapped to an archive
Returns:
The name of the archive where we expect the file to be
"""
num_archives = len(self.zipfilenames)
while num_archives > 0:
target = self.zipfilenames[num_archives - 1]
if len(target) > 1:
if self.CompareFilenames(target[1], file_path) >= 0:
return target[0]
num_archives -= 1
return None
def CompareFilenames(self, file1, file2):
"""Determines whether file1 is lexigraphically 'before' file2.
WARNING: This method assumes that paths are output in a depth-first,
with parent directories' files stored before childs'
We say that file1 is lexigraphically before file2 if the last non-matching
path segment of file1 is alphabetically before file2.
Args:
file1: the first file path
file2: the second file path
Returns:
A positive number if file1 is before file2
A negative number if file2 is before file1
0 if filenames are the same
"""
f1_segments = file1.split('/')
f2_segments = file2.split('/')
segment_ptr = 0
while (segment_ptr < len(f1_segments) and
segment_ptr < len(f2_segments) and
f1_segments[segment_ptr] == f2_segments[segment_ptr]):
segment_ptr += 1
if len(f1_segments) == len(f2_segments):
# we fell off the end, the paths much be the same
if segment_ptr == len(f1_segments):
return 0
# we didn't fall of the end, compare the segments where they differ
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
# the number of segments differs, we either mismatched comparing
# directories, or comparing a file to a directory
else:
# IF we were looking at the last segment of one of the paths,
# the one with fewer segments is first because files come before
# directories
# ELSE we just need to compare directory names
if (segment_ptr + 1 == len(f1_segments) or
segment_ptr + 1 == len(f2_segments)):
return len(f2_segments) - len(f1_segments)
else:
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
def SetCachingHeaders(self):
"""Set caching headers for the request."""
max_age = self.MAX_AGE
self.response.headers['Expires'] = email.Utils.formatdate(
time.time() + max_age, usegmt=True)
cache_control = []
if self.PUBLIC:
cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
self.response.headers['Cache-Control'] = ', '.join(cache_control)
# adding caching headers for the client
if self.CLIENT_CACHING:
if self.current_last_modified:
self.response.headers['Last-Modified'] = self.current_last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT")
if self.current_etag:
self.response.headers['ETag'] = self.current_etag
def GetFromCache(self, filename):
"""Get file from memcache, if available.
Args:
filename: The URL of the file to return
Returns:
The content of the file
"""
return memcache.get("%s%s" % (self.CACHE_PREFIX, filename))
def StoreOrUpdateInCache(self, filename, data):
"""Store data in the cache.
Store a piece of data in the memcache. Memcache has a maximum item size of
1*10^6 bytes. If the data is too large, fail, but log the failure. Future
work will consider compressing the data before storing or chunking it
Args:
filename: the name of the file to store
data: the data of the file
Returns:
None
"""
try:
if not memcache.add("%s%s" % (self.CACHE_PREFIX, filename), data):
memcache.replace("%s%s" % (self.CACHE_PREFIX, filename), data)
except (ValueError), err:
logging.warning("Data size too large to cache\n%s" % err)
def Write404Error(self):
"""Ouptut a simple 404 response."""
self.error(404)
self.response.out.write('Error 404, file not found')
def StoreInNegativeCache(self, filename):
"""If a non-existant URL is accessed, cache this result as well.
Future work should consider setting a maximum negative cache size to
prevent it from from negatively impacting the real cache.
Args:
filename: URL to add ot negative cache
Returns:
None
"""
memcache.add("%s%s" % (self.NEG_CACHE_PREFIX, filename), -1)
def GetFromNegativeCache(self, filename):
"""Retrieve from negative cache.
Args:
filename: URL to retreive
Returns:
The file contents if present in the negative cache.
"""
return memcache.get("%s%s" % (self.NEG_CACHE_PREFIX, filename))
def main():
application = webapp.WSGIApplication([('/([^/]+)/(.*)',
MemcachedZipHandler)])
util.run_wsgi_app(application)
if __name__ == '__main__':
main()

31
ntuh/dojango/bin/dojobuild.py Executable file
View file

@ -0,0 +1,31 @@
#!/usr/bin/env python
# This is the alternate dojo build command so it can be used
# with older versions of django (mainly because of AppEngine, it uses version 0.96)
import os
import sys
from optparse import OptionParser
def setup_environ():
# we assume, that dojango is installed within your django's project dir
project_directory = os.path.abspath(os.path.dirname(__file__)+'/../../')
settings_filename = "settings.py"
if not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
settings_name = os.path.splitext(settings_filename)[0]
sys.path.append(project_directory)
sys.path.append(os.path.abspath(project_directory + "/.."))
project_module = __import__(project_name, {}, {}, [''])
sys.path.pop()
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
return project_directory
project_dir = setup_environ()
from dojango.management.commands.dojobuild import Command
if __name__ == "__main__":
my_build = Command()
parser = OptionParser(option_list=my_build.option_list)
options, args = parser.parse_args(sys.argv)
my_build.handle(*args[1:], **options.__dict__)

0
ntuh/dojango/conf/__init__.py Executable file
View file

97
ntuh/dojango/conf/settings.py Executable file
View file

@ -0,0 +1,97 @@
import os
from django.conf import settings
DEBUG = getattr(settings, "DEBUG", False)
DEFAULT_CHARSET = getattr(settings, 'DEFAULT_CHARSET', 'utf-8')
DOJO_VERSION = getattr(settings, "DOJANGO_DOJO_VERSION", "1.6.0")
DOJO_PROFILE = getattr(settings, "DOJANGO_DOJO_PROFILE", "google")
DOJO_MEDIA_URL = getattr(settings, "DOJANGO_DOJO_MEDIA_URL", 'dojo-media')
BASE_MEDIA_URL = getattr(settings, "DOJANGO_BASE_MEDIA_URL", '/dojango/%s' % DOJO_MEDIA_URL)
BUILD_MEDIA_URL = getattr(settings, "DOJANGO_BUILD_MEDIA_URL", '%s/release' % BASE_MEDIA_URL)
BASE_MEDIA_ROOT = getattr(settings, "DOJANGO_BASE_MEDIA_ROOT", os.path.abspath(os.path.dirname(__file__)+'/../dojo-media/'))
BASE_DOJO_ROOT = getattr(settings, "DOJANGO_BASE_DOJO_ROOT", BASE_MEDIA_ROOT + "/src")
# as default the dijit theme folder is used
DOJO_THEME_URL = getattr(settings, "DOJANGO_DOJO_THEME_URL", False)
DOJO_THEME = getattr(settings, "DOJANGO_DOJO_THEME", "claro")
DOJO_DEBUG = getattr(settings, "DOJANGO_DOJO_DEBUG", DEBUG) # using the default django DEBUG setting
DOJO_SECURE_JSON = getattr(settings, "DOJANGO_DOJO_SECURE_JSON", True) # if you are using dojo version < 1.2.0 you have set it to False
CDN_USE_SSL = getattr(settings, "DOJANGO_CDN_USE_SSL", False) # is dojo served via https from google? doesn't work for aol!
# set the urls for actual possible paths for dojo
# one dojo profile must at least contain a path that defines the base url of a dojo installation
# the following settings can be set for each dojo profile:
# - base_url: where do the dojo files reside (without the version folder!)
# - use_xd: use the crossdomain-build? used to build the correct filename (e.g. dojo.xd.js)
# - versions: this list defines all possible versions that are available in the defined profile
# - uncompressed: use the uncompressed version of dojo (dojo.xd.js.uncompressed.js)
# - use_gfx: there is a special case, when using dojox.gfx from aol (see http://dev.aol.com/dojo)
# - is_local: marks a profile being local. this is needed when using the dojo module loader
# - is_local_build: profile being a locally builded version
_aol_versions = ('0.9.0', '1.0.0', '1.0.2', '1.1.0', '1.1.1', '1.2.0', '1.2.3', '1.3', '1.3.0', '1.3.1', '1.3.2', '1.4', '1.4.0', '1.4.1', '1.4.3', '1.5', '1.5.0', '1.6', '1.6.0')
_aol_gfx_versions = ('0.9.0', '1.0.0', '1.0.2', '1.1.0', '1.1.1',)
_google_versions = ('1.1.1', '1.2', '1.2.0', '1.2.3', '1.3', '1.3.0', '1.3.1', '1.3.2', '1.4', '1.4.0', '1.4.1', '1.4.3', '1.5', '1.5.0', '1.6', '1.6.0')
DOJO_PROFILES = {
'google': {'base_url':(CDN_USE_SSL and 'https' or 'http') + '://ajax.googleapis.com/ajax/libs/dojo', 'use_xd':True, 'versions':_google_versions}, # google just supports version >= 1.1.1
'google_uncompressed': {'base_url':(CDN_USE_SSL and 'https' or 'http') + '://ajax.googleapis.com/ajax/libs/dojo', 'use_xd':True, 'uncompressed':True, 'versions':_google_versions},
'aol': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'versions':_aol_versions},
'aol_uncompressed': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'uncompressed':True, 'versions':_aol_versions},
'aol_gfx': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'use_gfx':True, 'versions':_aol_gfx_versions},
'aol_gfx-uncompressed': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'use_gfx':True, 'uncompressed':True, 'versions':_aol_gfx_versions},
'local': {'base_url': '%(BASE_MEDIA_URL)s', 'is_local':True}, # we don't have a restriction on version names, name them as you like
'local_release': {'base_url': '%(BUILD_MEDIA_URL)s', 'is_local':True, 'is_local_build':True}, # this will be available after the first dojo build!
'local_release_uncompressed': {'base_url': '%(BUILD_MEDIA_URL)s', 'uncompressed':True, 'is_local':True, 'is_local_build':True} # same here
}
# we just want users to append/overwrite own profiles
DOJO_PROFILES.update(getattr(settings, "DOJANGO_DOJO_PROFILES", {}))
# =============================================================================================
# =================================== NEEDED FOR DOJO BUILD ===================================
# =============================================================================================
# general doc: http://dojotoolkit.org/book/dojo-book-0-9/part-4-meta-dojo/package-system-and-custom-builds
# see http://www.sitepen.com/blog/2008/04/02/dojo-mini-optimization-tricks-with-the-dojo-toolkit/ for details
DOJO_BUILD_VERSION = getattr(settings, "DOJANGO_DOJO_BUILD_VERSION", '1.6.0')
# this is the default build profile, that is used, when calling "./manage.py dojobuild"
# "./manage.py dojobuild dojango" would have the same effect
DOJO_BUILD_PROFILE = getattr(settings, "DOJANGO_DOJO_BUILD_PROFILE", "dojango")
# This dictionary defines your build profiles you can use within the custom command "./manage.py dojobuild
# You can set your own build profile within the main settings.py of the project by defining a dictionary
# DOJANGO_DOJO_BUILD_PROFILES, that sets the following key/value pairs for each defined profile name:
# profile_file: which dojo profile file is used for the build (see dojango.profile.js how it has to look)
# options: these are the options that are passed to the build command (see the dojo doc for details)
# OPTIONAL SETTINGS (see DOJO_BUILD_PROFILES_DEFAULT):
# base_root: in which directory will the dojo version be builded to?
# used_src_version: which version should be used for the dojo build (e.g. 1.1.1)
# build_version: what is the version name of the builded release (e.g. dojango1.1.1) - this option can be overwritten by the commandline parameter --build_version=...
# minify_extreme_skip_files: a tupel of files/folders (each expressed as regular expression) that should be kept when doing a minify extreme (useful when you have several layers and don't want some files)
# this tupel will be appended to the default folders/files that are skipped: see SKIP_FILES in management/commands/dojobuild.py
DOJO_BUILD_PROFILES = {
'dojango': {
'options': 'profileFile="%(BASE_MEDIA_ROOT)s/dojango.profile.js" action=release optimize=shrinksafe.keepLines cssOptimize=comments.keepLines',
},
'dojango_optimized': {
'options': 'profileFile="%(BASE_MEDIA_ROOT)s/dojango_optimized.profile.js" action=release optimize=shrinksafe.keepLines cssOptimize=comments.keepLines',
'build_version': '%(DOJO_BUILD_VERSION)s-dojango-optimized-with-dojo',
},
}
# these defaults are mixed into each DOJO_BUILD_PROFILES element
# but you can overwrite each attribute within your own build profile element
# e.g. DOJANGO_BUILD_PROFILES = {'used_src_version': '1.2.2', ....}
DOJO_BUILD_PROFILES_DEFAULT = getattr(settings, "DOJANGO_DOJO_BUILD_PROFILES_DEFAULT", {
# build the release in the media directory of dojango
# use a formatting string, so this can be set in the project's settings.py without getting the dojango settings
'base_root': '%(BASE_MEDIA_ROOT)s/release',
'used_src_version': '%(DOJO_BUILD_VERSION)s',
'build_version': '%(DOJO_BUILD_VERSION)s-dojango-with-dojo',
})
# TODO: we should also enable the already pre-delivered dojo default profiles
# you can add/overwrite your own build profiles
DOJO_BUILD_PROFILES.update(getattr(settings, "DOJANGO_DOJO_BUILD_PROFILES", {}))
DOJO_BUILD_JAVA_EXEC = getattr(settings, 'DOJANGO_DOJO_BUILD_JAVA_EXEC', 'java')
# a version string that must have the following form: '1.0.0', '1.2.1', ....
# this setting is used witin the dojobuild, because the build process changed since version 1.2.0
DOJO_BUILD_USED_VERSION = getattr(settings, 'DOJANGO_DOJO_BUILD_USED_VERSION', DOJO_BUILD_VERSION)

View file

@ -0,0 +1,25 @@
from dojango.util.config import Config
def config(request):
'''Make several dojango constants available in the template, like:
{{ DOJANGO.DOJO_BASE_URL }}, {{ DOJANGO.DOJO_URL }}, ...
You can also use the templatetag 'set_dojango_context' in your templates.
Just set the following at the top of your template to set these context
contants:
If you want to use the default DOJANGO_DOJO_VERSION/DOJANGO_DOJO_PROFILE:
{% load dojango_base %}
{% set_dojango_context %}
Using a difernet profile set the following:
{% load dojango_base %}
{% set_dojango_context "google" "1.1.1" %}
'''
context_extras = {'DOJANGO': {}}
config = Config()
context_extras['DOJANGO'] = config.get_context_dict()
return context_extras

182
ntuh/dojango/data/__init__.py Executable file
View file

@ -0,0 +1,182 @@
import re
__all__ = ('QueryInfo', 'QueryReadStoreInfo',
'JsonRestStoreInfo', 'JsonQueryRestStoreInfo',)
class QueryInfoFeatures(object):
sorting = True
paging = False
class QueryInfo(object):
'''Usage (is that the right solution?):
info = QueryInfo(request)
info.extract()
queryset = extract.process(Object.objects.all())
'''
start = 0
end = 25
filters = {}
sorting = [] # key=field // value=descending(True/False)
request = None
max_count = 25
def __init__(self, request, max_count=None, **kwargs):
self.request = request
if max_count is not None:
self.max_count = max_count
def extract(self):
self.set_paging()
self.set_sorting()
self.set_filters()
def set_paging(self):
"""Needs to be implemented in a subclass"""
pass
def set_sorting(self):
pass
def set_filters(self):
"""Needs to be implemented in a subclass"""
pass
def process(self, queryset):
# maybe using Django's paginator
return queryset.filter(**self.filters).order_by(*self.sorting)[self.start:self.end]
class QueryReadStoreInfo(QueryInfo):
"""
A helper to evaluate a request from a dojox.data.QueryReadStore
and extracting the following information from it:
- paging
- sorting
- filters
Parameters could be passed within GET or POST.
"""
def set_paging(self):
start = self.request[self.request.method].pop('start', 0)
# TODO: start = 1???
count = self.request[self.request.method].pop('count', 25)
#if not is_number(end): # The dojo combobox may return "Infinity" tsss
if not is_number(count) or int(count) > self.max_count:
count = self.max_count
self.start = int(start)
self.end = int(start)+int(count)
def set_sorting(self):
# REQUEST['sort']:
# value: -sort_field (descending) / sort_field (ascending)
sort_attr = self.request[self.request.method].pop('sort', None)
if sort_attr:
self.sorting.append(sort_attr)
def set_filters(self):
query_dict = {}
for k,v in self.request[self.request.method].items():
query_dict[k] = v
class JsonRestStoreInfo(QueryReadStoreInfo):
"""
A helper to evaluate a request from a dojox.data.JsonRestStoreInfo
and extracting the following information:
- paging
- filters
The paging parameter is passed within the request header "Range".
Filters are passed via GET (equal to QueryReadStoreInfo).
Sorting is just possible with JsonQueryReadStoreInfo.
"""
def set_paging(self):
# Receiving the following header:
# Range: items=0-24
# Returning: Content-Range: items 0-24/66
if 'RANGE' in self.META:
regexp = re.compile(r"^\s*items=(\d+)-(\d+)", re.I)
match = regexp.match(self.META['RANGE'])
if match:
start, end = match.groups()
start, end = int(start), int(end)+1 # range-end means including that element!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# sorting is not available in the normal JsonRestStore
pass
class JsonQueryRestStoreInfo(QueryInfo):
jsonpath = None
jsonpath_filters = None
jsonpath_sorting = None
jsonpath_paging = None
def __init__(self, request, **kwargs):
"""
Matching the following example jsonpath:
/path/[?(@.field1='searchterm*'&@.field2='*search*')][/@['field1'],/@['field2']][0:24]
The last part of the URL will contain a JSONPath-query:
[filter][sort][start:end:step]
"""
path = request.path
if not path.endswith("/"):
path = path + "/"
# assuming that a least one /path/ will be before the jsonpath query
# and that the character [ initiates and ] ends the jsonpath
# [ will be removed from the start and ] from the end
match = re.match(r'^/.*/(\[.*\])/$', path)
if match:
self.jsonpath = match.groups()[0]
if self.jsonpath:
# now we remove the starting [ and ending ] and also splitting it via ][
parts = self.jsonpath[1:-1].split("][")
for part in parts:
if part.startswith("?"):
self.jsonpath_filters = part
elif re.match(r'^[/\\].*$', part):
self.jsonpath_sorting = part
# [start:end:step]
elif re.match(r'^\d*:\d*:{0,1}\d*$', part):
self.jsonpath_paging = part
super(JsonQueryRestStoreInfo, self).__init__(request, **kwargs)
def set_paging(self):
# handling 0:24
match = re.match(r'^(\d*):(\d*):{0,1}\d*$', self.jsonpath_paging)
if match:
start, end = match.groups()
if(start.length == 0):
start = 0
if(end.length == 0):
end = int(start) + self.max_count
start, end = int(start), int(end)+1 # second argument means the element should be included!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# handling /@['field1'],/@['field2']
for f in self.jsonpath_sorting.split(",/"):
m = re.match(r"([\\/])@\['(.*)'\]", f)
if m:
sort_prefix = "-"
direction, field = m.groups()
if direction == "/":
descending = ""
self.sorting.append(sort_prefix + field)
def set_filters(self):
# handling ?(@.field1='searchterm*'&@.field2~'*search*')
pass

View file

@ -0,0 +1,29 @@
""" Django ModelStore
"""
from stores import Store, ModelQueryStore
from methods import Method, ModelMethod, \
ObjectMethod, StoreMethod, FieldMethod, ValueMethod, \
RequestArg, ModelArg, ObjectArg, FieldArg, StoreArg
from fields import StoreField, ReferenceField
from services import BaseService, JsonService, servicemethod
from utils import get_object_from_identifier
__all__ = (
'Store', 'ModelQueryStore',
'Method', 'ModelMethod', 'ObjectMethod', 'StoreMethod',
'FieldMethod', 'ValueMethod',
'RequestArg', 'ModelArg', 'ObjectArg', 'FieldArg', 'StoreArg',
'StoreField', 'ReferenceField',
'BaseService', 'JsonService', 'servicemethod',
'get_object_from_identifier'
)

View file

@ -0,0 +1,27 @@
""" Django ModelStore exception classes
"""
__all__ = ('MethodException', 'FieldException',
'StoreException', 'ServiceException')
class MethodException(Exception):
""" Raised when an error occurs related to a custom
method (Method, ObjectMethod, etc.) call
"""
pass
class FieldException(Exception):
""" Raised when an error occurs related to a custom
StoreField definition
"""
pass
class StoreException(Exception):
""" Raised when an error occurs related to a
Store definition
"""
class ServiceException(Exception):
""" Raised when an error occurs related to a custom
Service definition or servicemethod call
"""

View file

@ -0,0 +1,197 @@
import utils
from exceptions import FieldException
import methods
__all__ = ('FieldException', 'StoreField'
'ReferenceField', 'DojoDateField')
class StoreField(object):
""" The base StoreField from which all ```StoreField```s derive
"""
def __init__(self, model_field=None, store_field=None, get_value=None, sort_field=None, can_sort=True):
""" A StoreField corresponding to a field on a model.
Arguments (all optional):
model_field
The name of the field on the model. If omitted then
it's assumed to be the attribute name given to this StoreField
in the Store definition.
Example:
>>> class MyStore(Store):
>>> field_1 = StoreField() # The model_field will be Model.field_1
>>> field_2 = StoreField('my_field') # The model_field will be Model.my_field
store_field
The name of the field in the final store. If omitted then
it will be the attribute name given to this StoreField in the
Store definition.
Example:
>>> class MyStore(Store):
>>> field_1 = StoreField() # The store_field will be 'field_1'
>>> field_2 = StoreField(store_field='my_store_field')
get_value
An instance of modelstore.methods.BaseMethod (or any callable)
used to get the final value from the field (or anywhere) that
will go in the store.
Example:
def get_custom_value():
return 'my custom value'
>>> class MyStore(Store):
# get_custom_value will be called with no arguments
>>> field_1 = StoreField(get_value=get_custom_value)
# Wrap your method in an instance of methods.BaseMethod if you want to pass
# custom arguments -- see methods.BaseMethod (and it's derivatives) for full docs.
>>> field_2 = StoreField(get_value=Method(get_custom_value, arg1, arg2, arg3))
sort_field
Denotes the string used with QuerySet.order_by() to sort the objects
by this field.
Either the value passed to 'order_by()' on Django
QuerySets or an instance of modelstore.methods.BaseMethod
(or any callable) which returns the value.
Requests to sort descending are handled automatically by prepending the sort field
with '-'
Example:
>>> class MyStore(Store):
# QuerySet.order_by() will be called like: QuerySet.order_by('my_model_field')
>>> field_1 = StoreField('my_model_field')
# Sorting by dotted fields.
>>> field_2 = StoreField('my.dotted.field', sort_field='my__dotted__field')
can_sort
Whether or not this field can be order_by()'d -- Default is True.
If this is False, then attempts to sort by this field will be ignored.
"""
self._model_field_name = model_field
self._store_field_name = store_field
self._store_attr_name = None # We don't know this yet
self.can_sort = can_sort
self._sort_field = sort_field
self._get_value = get_value
# Attach a reference to this field to the get_value method
# so it can access proxied_args
if self._get_value:
setattr(self._get_value, 'field', self)
# Proxied arguments (ie, RequestArg, ObjectArg etc.)
self.proxied_args = {}
def _get_sort_field(self):
""" Return the name of the field to be passed to
QuerySet.order_by().
Either the name of the value passed to 'order_by()' on Django
QuerySets or some method which returns the value.
"""
if (self._sort_field is None) or isinstance(self._sort_field, (str, unicode) ):
return self._sort_field
else:
return self._sort_field()
sort_field = property(_get_sort_field)
def _get_store_field_name(self):
""" Return the name of the field in the final store.
If an explicit store_field is given in the constructor then that is
used, otherwise it's the attribute name given to this field in the
Store definition.
"""
return self._store_field_name or self._store_attr_name
store_field_name = property(_get_store_field_name)
def _get_model_field_name(self):
""" Return the name of the field on the Model that this field
corresponds to.
If an explicit model_field (the first arg) is given in the constructor
then that is used, otherwise it's assumed to be the attribute name
given to this field in the Store definition.
"""
return self._model_field_name or self._store_attr_name
model_field_name = property(_get_model_field_name)
def get_value(self):
""" Returns the value for this field
"""
if not self._get_value:
self._get_value = methods.ObjectMethod(self.model_field_name)
self._get_value.field = self
return self._get_value()
class ReferenceField(StoreField):
""" A StoreField that handles '_reference' items
Corresponds to model fields that refer to other models,
ie, ForeignKey, ManyToManyField etc.
"""
def get_value(self):
""" Returns a list (if more than one) or dict
of the form:
{'_reference': '<item identifier>'}
"""
# The Store we're attached to
store = self.proxied_args['StoreArg']
items = []
if not self._get_value:
self._get_value = methods.ObjectMethod(self.model_field_name)
self._get_value.field = self
related = self._get_value()
if not bool(related):
return items
# Is this a model instance (ie from ForeignKey) ?
if hasattr(related, '_get_pk_val'):
return {'_reference': store.get_identifier(related)}
# Django Queryset or Manager
if hasattr(related, 'iterator'):
related = related.iterator()
try:
for item in related:
items.append({'_reference': store.get_identifier(item)})
except TypeError:
raise FieldException('Cannot iterate on field "%s"' % (
self.model_field_name
))
return items
###
# Pre-built custom Fields
###
class DojoDateField(StoreField):
def get_value(self):
self._get_value = methods.DojoDateMethod
self._get_value.field = self
return self._get_value()

View file

@ -0,0 +1,301 @@
import utils
from exceptions import MethodException
class Arg(object):
""" The base placeholder argument class
There is no reason to use this class directly and really
only exists to do some type checking on classes that
inherit from it
"""
pass
class RequestArg(Arg):
""" Placeholder argument that represents the current
Request object.
"""
pass
class ModelArg(Arg):
""" Placeholder argument that represents the current
Model object.
>>> user = User.objects.get(pk=1)
>>>
In this case 'user' is the ObjectArg and
and 'User' is the ModelArg.
"""
pass
class ObjectArg(Arg):
""" Placeholder argument that represents the current
Model object instance.
user = User.objects.get(pk=1)
'user' is the ObjectArg, 'User' is the ModelArg
"""
pass
class StoreArg(Arg):
""" Placeholder argument that represents the current
Store instance.
"""
pass
class FieldArg(Arg):
""" Placeholder argument that represents the current
Field instance.
This is the field specified on the Store object,
not the Model object.
"""
pass
class BaseMethod(object):
""" The base class from which all proxied methods
derive.
"""
def __init__(self, method_or_methodname, *args, **kwargs):
""" The first argument is either the name of a method
or the method object itself (ie, pointer to the method)
The remaining arguments are passed to the given method
substituting any proxied arguments as needed.
Usage:
>>> method = Method('my_method', RequestArg, ObjectArg, 'my other arg', my_kwarg='Something')
>>> method()
'My Result'
>>>
The method call looks like:
>>> my_method(request, model_instance, 'my_other_arg', my_kwarg='Something')
"""
self.method_or_methodname = method_or_methodname
self.args = args
self.kwargs = kwargs
self.field = None # Don't have a handle on the field yet
def __call__(self):
""" Builds the arguments and returns the value of the method call
"""
self._build_args()
return self.get_value()
def _build_args(self):
""" Builds the arguments to be passed to the given method
Substitutes placeholder args (ie RequestArg, ObjectArg etc.)
with the actual objects.
"""
args = []
for arg in self.args:
try:
arg = self.field.proxied_args.get(arg.__name__, arg)
except AttributeError: # No __name__ attr on the arg
pass
args.append(arg)
self.args = args
for key, val in self.kwargs.items():
self.kwargs.update({
key: self.field.proxied_args.get(hasattr(val, '__name__') and val.__name__ or val, val)
})
def get_value(self):
""" Calls the given method with the requested arguments.
"""
raise NotImplementedError('get_value() not implemented in BaseMethod')
def get_method(self, obj=None):
""" Resolves the given method into a callable object.
If 'obj' is provided, the method will be looked for as an
attribute of the 'obj'
Supports dotted names.
Usage:
>>> method = Method('obj.obj.method', RequestArg)
>>> method()
'Result of method called with: obj.obj.method(request)'
>>>
Dotted attributes are most useful when using something like an
an ObjectMethod:
(where 'user' is an instance of Django's 'User' model,
the Object in this example is the 'user' instance)
>>> method = ObjectMethod('date_joined.strftime', '%Y-%m-%d %H:%M:%S')
>>> method()
2009-10-02 09:58:39
>>>
The actual method call looks like:
>>> user.date_joined.strftime('%Y-%m-%d %H:%M:%S')
2009-10-02 09:58:39
>>>
It also supports attributes which are not actually methods:
>>> method = ObjectMethod('first_name', 'ignored arguments', ...) # Arguments to a non-callable are ignored.
>>> method()
u'Bilbo'
>>> method = ValueMethod('first_name', 'upper') # Called on the returned value
>>> method()
u'BILBO'
>>>
The method call for the last one looks like:
>>> user.first_name.upper()
u'BILBO'
>>>
"""
if callable(self.method_or_methodname):
return self.method_or_methodname
if not isinstance(self.method_or_methodname, (str, unicode) ):
raise MethodException('Method must a string or callable')
if obj is not None:
try:
method = utils.resolve_dotted_attribute(obj, self.method_or_methodname)
except AttributeError:
raise MethodException('Cannot resolve method "%s" in object "%s"' % (
self.method_or_methodname, type(obj)
))
if not callable(method):
# Turn this into a callable
m = method
def _m(*args, **kwargs): return m
method = _m
return method
try:
return eval(self.method_or_methodname) # Just try to get it in current scope
except NameError:
raise MethodException('Cannot resolve method "%s"' % self.method_or_methodname)
class Method(BaseMethod):
""" Basic method proxy class.
Usage:
>>> method = Method('my_global_method')
>>> result = method()
>>> method = Method(my_method, RequestArg, ObjectArg)
>>> result = method()
The real method call would look like:
>>> my_method(request, model_object)
Notes:
If the method passed is the string name of a method,
it is evaluated in the global scope to get the actual
method, or MethodException is raised.
>>> method = Method('my_method')
Under the hood:
>>> try:
>>> method = eval('my_method')
>>> except NameError:
>>> ...
"""
def get_value(self):
return self.get_method()(*self.args, **self.kwargs)
class ModelMethod(BaseMethod):
""" A method proxy that will look for the given method
as an attribute on the Model.
"""
def get_value(self):
obj = self.field.proxied_args['ModelArg']
return self.get_method(obj)(*self.args, **self.kwargs)
class ObjectMethod(BaseMethod):
""" A method proxy that will look for the given method
as an attribute on the Model instance.
Example:
>>> method = ObjectMethod('get_full_name')
>>> method()
u'Bilbo Baggins'
Assuming this is used on an instance of Django's 'User' model,
the method call looks like:
>>> user.get_full_name()
"""
def get_value(self):
obj = self.field.proxied_args['ObjectArg']
return self.get_method(obj)(*self.args, **self.kwargs)
class StoreMethod(BaseMethod):
""" A method proxy that will look for the given method
as an attribute on the Store.
"""
def get_value(self):
obj = self.field.proxied_args['StoreArg']
return self.get_method(obj)(*self.args, **self.kwargs)
class FieldMethod(BaseMethod):
""" A method proxy that will look for the given method
as an attribute on the Field.
Notes:
Field is the field on the Store, not the Model.
"""
def get_value(self):
obj = self.field.proxied_args['FieldArg']
return self.get_method(obj)(*self.args, **self.kwargs)
class ValueMethod(BaseMethod):
""" A method proxy that will look for the given method
as an attribute on the value of a field.
Usage:
>>> user = User.objects.get(pk=1)
>>> user.date_joined
datetime.datetime(..)
>>>
A ValueMethod would look for the given method on
the datetime object:
>>> method = ValueMethod('strftime', '%Y-%m-%d %H:%M:%S')
>>> method()
u'2009-10-02 12:32:12'
>>>
"""
def get_value(self):
obj = self.field.proxied_args['ObjectArg']
val = utils.resolve_dotted_attribute(obj, self.field.model_field_name)
# Prevent throwing a MethodException if the value is None
if val is None:
return None
return self.get_method(val)(*self.args, **self.kwargs)
###
# Pre-built custom Methods
###
DojoDateMethod = ValueMethod('strftime', '%Y-%m-%dT%H:%M:%S')

View file

@ -0,0 +1,266 @@
import sys, inspect
from django.utils import simplejson
from exceptions import ServiceException
def servicemethod(*args, **kwargs):
""" The Service method decorator.
Decorate a function or method to expose it remotely
via RPC (or other mechanism.)
Arguments:
name (optional):
The name of this method as seen remotely.
store (required if not decorating a bound Store method):
A reference to the Store this method operates on.
This is required if the method is a regular function,
a staticmethod or otherwise defined outside a Store instance.
(ie doesn't take a 'self' argument)
store_arg (optional):
Specifies whether this method should be passed the Store instance
as the first argument (default is True so that servicemethods bound to
a store instance can get a proper 'self' reference.)
request_arg (optional):
Specifies whether this method should be passed a reference to the current
Request object. (Default is True)
If both store_arg and request_arg are True, the the store will be passed first,
then the request (to appease bound store methods that need a 'self' as the first arg)
If only one is True then that one will be passed first. This is useful for using
standard Django view functions as servicemethods since they require the 'request'
as the first argument.
"""
# Default options
options = {'name': None, 'store': None, 'request_arg': True, 'store_arg': True}
# Figure out if we were called with arguments
# If we were called with args, ie:
# @servicemethod(name='Foo')
# Then the only argument here will be the pre-decorated function/method object.
method = ( (len(args) == 1) and callable(args[0]) ) and args[0] or None
if method is None:
# We were called with args, (or @servicemethod() )
# so figure out what they were ...
# The method name should be either the first non-kwarg
# or the kwarg 'name'
# Example: @servicemethod('my_method', ...) or @servicemethod(name='my_method')
options.update({
'name': bool(args) and args[0] or kwargs.pop('name', None),
'store': (len(args) >= 2) and args[1] or kwargs.pop('store', None),
'request_arg': kwargs.pop('request_arg', True),
'store_arg': kwargs.pop('store_arg', True),
})
else:
options['name'] = method.__name__
method.__servicemethod__ = options
def method_with_args_wrapper(method):
""" Wrapper for a method decorated with decorator arguments
"""
if options['name'] is None:
options['name'] = method.__name__
method.__servicemethod__ = options
if options['store'] is not None:
options['store'].service.add_method(method)
return method
return method or method_with_args_wrapper
class BaseService(object):
""" The base Service class that manages servicemethods and
service method descriptions
"""
def __init__(self):
""" BaseService constructor
"""
self.methods = {}
self._store = None
def _get_store(self):
""" Property getter for the store this service is
bound to
"""
return self._store
def _set_store(self, store):
""" Property setter for the store this service is
bound to. Automatically updates the store
reference in all the __servicemethod__
properties on servicemethods in this service
"""
for method in self.methods.values():
method.__servicemethod__['store'] = store
self._store = store
store = property(_get_store, _set_store)
def _get_method_args(self, method, request, params):
""" Decide if we should pass store_arg and/or request_arg
to the servicemethod
"""
idx = 0
if method.__servicemethod__['store_arg']:
params.insert(idx, method.__servicemethod__['store'])
idx += 1
if method.__servicemethod__['request_arg']:
params.insert(idx, request)
return params
def add_method(self, method, name=None, request_arg=True, store_arg=True):
""" Adds a method as a servicemethod to this service.
"""
# Was this a decorated servicemethod?
if hasattr(method, '__servicemethod__'):
options = method.__servicemethod__
else:
options = {'name': name or method.__name__, 'store': self.store,
'request_arg': request_arg, 'store_arg': store_arg}
method.__servicemethod__ = options
self.methods[ options['name'] ] = method
def get_method(self, name):
""" Returns the servicemethod given by name
"""
try:
return self.methods[name]
except KeyError:
raise ServiceException('Service method "%s" not registered' % name)
def list_methods(self):
""" Returns a list of all servicemethod names
"""
return self.methods.keys()
def process_request(self, request):
""" Processes a request object --
This is generally the entry point for all
servicemethod calls
"""
raise NotImplementedError('process_request not implemented in BaseService')
def process_response(self, id, result):
""" Prepares a response from a servicemethod call
"""
raise NotImplementedError('process_response not implemented in BaseService')
def process_error(self, id, code, error):
""" Prepares an error response from a servicemethod call
"""
raise NotImplementedError('process_error not implemented in BaseService')
def get_smd(self, url):
""" Returns a service method description of all public servicemethods
"""
raise NotImplementedError('get_smd not implemented in BaseService')
class JsonService(BaseService):
""" Implements a JSON-RPC version 1.1 service
"""
def __call__(self, request):
""" JSON-RPC method calls come in as POSTs
--
Requests for the SMD come in as GETs
"""
if request.method == 'POST':
response = self.process_request(request)
else:
response = self.get_smd(request.get_full_path())
return simplejson.dumps(response)
def process_request(self, request):
""" Handle the request
"""
try:
data = simplejson.loads(request.raw_post_data)
id, method_name, params = data["id"], data["method"], data["params"]
# Doing a blanket except here because God knows kind of crazy
# POST data might come in.
except:
return self.process_error(0, 100, 'Invalid JSON-RPC request')
try:
method = self.get_method(method_name)
except ServiceException:
return self.process_error(id, 100, 'Unknown method: "%s"' % method_name)
params = self._get_method_args(method, request, params)
try:
result = method(*params)
return self.process_response(id, result)
except BaseException:
etype, eval, etb = sys.exc_info()
return self.process_error(id, 100, '%s: %s' % (etype.__name__, eval) )
except:
etype, eval, etb = sys.exc_info()
return self.process_error(id, 100, 'Exception %s: %s' % (etype, eval) )
def process_response(self, id, result):
""" Build a JSON-RPC 1.1 response dict
"""
return {
'version': '1.1',
'id': id,
'result': result,
'error': None,
}
def process_error(self, id, code, error):
""" Build a JSON-RPC 1.1 error dict
"""
return {
'id': id,
'version': '1.1',
'error': {
'name': 'JSONRPCError',
'code': code,
'message': error,
},
}
def get_smd(self, url):
""" Generate a JSON-RPC 1.1 Service Method Description (SMD)
"""
smd = {
'serviceType': 'JSON-RPC',
'serviceURL': url,
'methods': []
}
for name, method in self.methods.items():
# Figure out what params to report --
# we don't want to report the 'store' and 'request'
# params to the remote method.
idx = 0
idx += method.__servicemethod__['store_arg'] and 1 or 0
idx += method.__servicemethod__['request_arg'] and 1 or 0
sig = inspect.getargspec(method)
smd['methods'].append({
'name': name,
'parameters': [ {'name': val} for val in sig.args[idx:] ]
})
return smd

View file

@ -0,0 +1,454 @@
from django.utils import simplejson
from django.utils.encoding import smart_unicode
from django.core.paginator import Paginator
from utils import get_fields_and_servicemethods
from exceptions import StoreException, ServiceException
from services import JsonService, servicemethod
__all__ = ('Store', 'ModelQueryStore')
class StoreMetaclass(type):
""" This class (mostly) came from django/forms/forms.py
See the original class 'DeclarativeFieldsMetaclass' for doc and comments.
"""
def __new__(cls, name, bases, attrs):
# Get the declared StoreFields and service methods
fields, servicemethods = get_fields_and_servicemethods(bases, attrs)
attrs['servicemethods'] = servicemethods
# Tell each field the name of the attribute used to reference it
# in the Store
for fieldname, field in fields.items():
setattr(field, '_store_attr_name', fieldname)
attrs['fields'] = fields
return super(StoreMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseStore(object):
""" The base Store from which all Stores derive
"""
class Meta(object):
""" Inner class to hold store options.
Same basic concept as Django's Meta class
on Model definitions.
"""
pass
def __init__(self, objects=None, stores=None, identifier=None, label=None, is_nested=False):
""" Store instance constructor.
Arguments (all optional):
objects:
The list (or any iterable, ie QuerySet) of objects that will
fill the store.
stores:
One or more Store objects to combine together into a single
store. Useful when using ReferenceFields to build a store
with objects of more than one 'type' (like Django models
via ForeignKeys, ManyToManyFields etc.)
identifier:
The 'identifier' attribute used in the store.
label:
The 'label' attribute used in the store.
is_nested:
This is required, if we want to return the items as direct
array and not as dictionary including
{'identifier': "id", 'label', ...}
It mainly is required, if children of a tree structure needs
to be rendered (see TreeStore).
"""
# Instantiate the inner Meta class
self._meta = self.Meta()
# Move the fields into the _meta instance
self.set_option('fields', self.fields)
# Set the identifier
if identifier:
self.set_option('identifier', identifier)
elif not self.has_option('identifier'):
self.set_option('identifier', 'id')
# Set the label
if label:
self.set_option('label', label)
elif not self.has_option('label'):
self.set_option('label', 'label')
# Is this a nested store? (indicating that it should be rendered as array)
self.is_nested = is_nested
# Set the objects
if objects != None:
self.set_option('objects', objects)
elif not self.has_option('objects'):
self.set_option('objects', [])
# Set the stores
if stores:
self.set_option('stores', stores)
elif not self.has_option('stores'):
self.set_option('stores', [])
# Instantiate the stores (if required)
self.set_option('stores', [ isinstance(s, Store) and s or s() for s in self.get_option('stores') ])
# Do we have service set?
try:
self.service = self.get_option('service')
self.service.store = self
# Populate all the declared servicemethods
for method in self.servicemethods.values():
self.service.add_method(method)
except StoreException:
self.service = None
self.request = None # Placeholder for the Request object (if used)
self.data = self.is_nested and [] or {} # The serialized data in it's final form
def has_option(self, option):
""" True/False whether the given option is set in the store
"""
try:
self.get_option(option)
except StoreException:
return False
return True
def get_option(self, option):
""" Returns the given store option.
Raises a StoreException if the option isn't set.
"""
try:
return getattr(self._meta, option)
except AttributeError:
raise StoreException('Option "%s" not set in store' % option)
def set_option(self, option, value):
""" Sets a store option.
"""
setattr(self._meta, option, value)
def __call__(self, request):
""" Called when an instance of this store is called
(ie as a Django 'view' function from a URLConf).
It accepts the Request object as it's only param, which
it makes available to other methods at 'self.request'.
Returns the serialized store as Json.
"""
self.request = request
if self.service:
self._merge_servicemethods()
if not self.is_nested:
self.data['SMD'] = self.service.get_smd( request.get_full_path() )
if request.method == 'POST':
return self.service(request)
return self.to_json()
def __str__(self):
""" Renders the store as Json.
"""
return self.to_json()
def __repr__(self):
""" Renders the store as Json.
"""
count = getattr(self.get_option('objects'), 'count', '__len__')()
return '<%s: identifier: %s, label: %s, objects: %d>' % (
self.__class__.__name__, self.get_option('identifier'), self.get_option('label'), count)
def get_identifier(self, obj):
""" Returns a (theoretically) unique key for a given
object of the form: <appname>.<modelname>__<pk>
"""
return smart_unicode('%s__%s' % (
obj._meta,
obj._get_pk_val(),
), strings_only=True)
def get_label(self, obj):
""" Calls the object's __unicode__ method
to get the label if available or just returns
the identifier.
"""
try:
return obj.__unicode__()
except AttributeError:
return self.get_identifier(obj)
def _merge_servicemethods(self):
""" Merges the declared service methods from multiple
stores into a single store. The store reference on each
method will still point to the original store.
"""
# only run if we have a service set
if self.service:
for store in self.get_option('stores'):
if not store.service: # Ignore when no service is defined.
continue
for name, method in store.service.methods.items():
try:
self.service.get_method(name)
raise StoreException('Combined stores have conflicting service method name "%s"' % name)
except ServiceException: # This is what we want
# Don't use service.add_method since we want the 'foreign' method to
# stay attached to the original store
self.service.methods[name] = method
def _merge_stores(self):
""" Merge all the stores into one.
"""
for store in self.get_option('stores'):
# The other stores will (temporarily) take on this store's 'identifier' and
# 'label' settings
orig_identifier = store.get_option('identifier')
orig_label = store.get_option('label')
for attr in ('identifier', 'label'):
store.set_option(attr, self.get_option(attr))
self.data['items'] += store.to_python()['items']
# Reset the old values for label and identifier
store.set_option('identifier', orig_identifier)
store.set_option('label', orig_label)
def add_store(self, *stores):
""" Add one or more stores to this store.
Arguments (required):
stores:
One or many Stores (or Store instances) to add to this store.
Usage:
>>> store.add_store(MyStore1, MyStore2(), ...)
>>>
"""
# If a non-instance Store is given, instantiate it.
stores = [ isinstance(s, Store) and s or s() for s in stores ]
self.set_option('stores', list( self.get_option('stores') ) + stores )
def to_python(self, objects=None):
""" Serialize the store into a Python dictionary.
Arguments (optional):
objects:
The list (or any iterable, ie QuerySet) of objects that will
fill the store -- the previous 'objects' setting will be restored
after serialization is finished.
"""
if objects is not None:
# Save the previous objects setting
old_objects = self.get_option('objects')
self.set_option('objects', objects)
self._serialize()
self.set_option('objects', old_objects)
else:
self._serialize()
return self.data
def to_json(self, *args, **kwargs):
""" Serialize the store as Json.
Arguments (all optional):
objects:
(The kwarg 'objects')
The list (or any iterable, ie QuerySet) of objects that will
fill the store.
All other args and kwargs are passed to simplejson.dumps
"""
objects = kwargs.pop('objects', None)
return simplejson.dumps( self.to_python(objects), *args, **kwargs )
def _start_serialization(self):
""" Called when serialization of the store begins
"""
if not self.is_nested:
self.data['identifier'] = self.get_option('identifier')
# Don't set a label field in the store if it's not wanted
if bool( self.get_option('label') ) and not self.is_nested:
self.data['label'] = self.get_option('label')
if self.is_nested:
self.data = []
else:
self.data['items'] = []
def _start_object(self, obj):
""" Called when starting to serialize each object in 'objects'
Requires an object as the only argument.
"""
# The current object in it's serialized state.
self._item = {self.get_option('identifier'): self.get_identifier(obj)}
label = self.get_option('label')
# Do we have a 'label' and is it already the
# name of one of the declared fields?
if label and ( label not in self.get_option('fields').keys() ):
# Have we defined a 'get_label' method on the store?
if callable( getattr(self, 'get_label', None) ):
self._item[label] = self.get_label(obj)
def _handle_field(self, obj, field):
""" Handle the given field in the Store
"""
# Fill the proxied_args on the field (for get_value methods that use them)
field.proxied_args.update({
'RequestArg': self.request,
'ObjectArg': obj,
'ModelArg': obj.__class__,
'FieldArg': field,
'StoreArg': self,
})
# Get the value
self._item[field.store_field_name] = field.get_value()
def _end_object(self, obj):
""" Called when serializing an object ends.
"""
if self.is_nested:
self.data.append(self._item)
else:
self.data['items'].append(self._item)
self._item = None
def _end_serialization(self):
""" Called when serialization of the store ends
"""
pass
def _serialize(self):
""" Serialize the defined objects and stores into it's final form
"""
self._start_serialization()
for obj in self.get_option('objects'):
self._start_object(obj)
for field in self.get_option('fields').values():
self._handle_field(obj, field)
self._end_object(obj)
self._end_serialization()
self._merge_stores()
class Store(BaseStore):
""" Just defines the __metaclass__
All the real functionality is implemented in
BaseStore
"""
__metaclass__ = StoreMetaclass
class ModelQueryStore(Store):
""" A store designed to be used with dojox.data.QueryReadStore
Handles paging, sorting and filtering
At the moment it requires a custom subclass of QueryReadStore
that implements the necessary mechanics to handle server queries
the the exported Json RPC 'fetch' method. Soon it will support
QueryReadStore itself.
"""
def __init__(self, *args, **kwargs):
"""
"""
objects_per_query = kwargs.pop('objects_per_query', None)
super(ModelQueryStore, self).__init__(*args, **kwargs)
if objects_per_query is not None:
self.set_option('objects_per_query', objects_per_query)
elif not self.has_option('objects_per_query'):
self.set_option('objects_per_query', 25)
def filter_objects(self, request, objects, query):
""" Overridable method used to filter the objects
based on the query dict.
"""
return objects
def sort_objects(self, request, objects, sort_attr, descending):
""" Overridable method used to sort the objects based
on the attribute given by sort_attr
"""
return objects
def __call__(self, request):
"""
"""
self.request = request
# We need the request.GET QueryDict to be mutable.
query_dict = {}
for k,v in request.GET.items():
query_dict[k] = v
# dojox.data.QueryReadStore only handles sorting by a single field
sort_attr = query_dict.pop('sort', None)
descending = False
if sort_attr and sort_attr.startswith('-'):
descending = True
sort_attr = sort_attr.lstrip('-')
# Paginator is 1-indexed
start_index = int( query_dict.pop('start', 0) ) + 1
# Calculate the count taking objects_per_query into account
objects_per_query = self.get_option('objects_per_query')
count = query_dict.pop('count', objects_per_query)
# We don't want the client to be able to ask for a million records.
# They can ask for less, but not more ...
if count == 'Infinity' or count > objects_per_query:
count = objects_per_query
objects = self.filter_objects(request, self.get_option('objects'), query_dict)
objects = self.sort_objects(request, objects, sort_attr, descending)
paginator = Paginator(objects, count)
page_num = 1
for i in xrange(1, paginator.num_pages + 1):
if paginator.page(i).start_index() <= start_index <= paginator.page(i).end_index():
page_num = i
break
page = paginator.page(page_num)
data = self.to_python(objects=page.object_list)
data['numRows'] = paginator.count
return data

View file

@ -0,0 +1,44 @@
from stores import Store
from fields import StoreField
from methods import BaseMethod
class ChildrenMethod(BaseMethod):
""" A method proxy that will resolve the children
of a model that has a tree structure.
"django-treebeard" and "django-mptt" both attach a get_children method
to the model.
"""
def get_value(self):
store = self.field.proxied_args['StoreArg']
obj = self.field.proxied_args['ObjectArg']
ret = []
# TODO: optimize using get_descendants()
if hasattr(obj, "get_children"):
ret = store.__class__(objects=obj.get_children(), is_nested=True).to_python()
return ret
class ChildrenField(StoreField):
""" A field that renders children items
If your model provides a get_children method you can use that field
to render all children recursively.
(see "django-treebeard", "django-mptt")
"""
def get_value(self):
self._get_value = ChildrenMethod(self.model_field_name)
self._get_value.field = self
return self._get_value()
class TreeStore(Store):
""" A store that already includes the children field with no additional
options. Just subclass that Store, add the to-be-rendered fields and
attach a django-treebeard (or django-mptt) model to its Meta class:
class MyStore(TreeStore):
username = StoreField()
first_name = StoreField()
class Meta:
objects = YourTreeModel.objects.filter(id=1) # using treebeard or mptt
label = 'username'
"""
children = ChildrenField()

View file

@ -0,0 +1,95 @@
from django.utils.datastructures import SortedDict
from django.db.models import get_model
from fields import StoreField
from exceptions import StoreException
def get_object_from_identifier(identifier, valid=None):
""" Helper function to resolve an item identifier
into a model instance.
Raises StoreException if the identifier is invalid
or the requested Model could not be found
Raises <Model>.DoesNotExist if the object lookup fails
Arguments (optional):
valid
One or more Django model classes to compare the
returned model instance to.
"""
try:
model_str, pk = identifier.split('__')
except ValueError:
raise StoreException('Invalid identifier string')
Model = get_model(*model_str.split('.'))
if Model is None:
raise StoreException('Model from identifier string "%s" not found' % model_str)
if valid is not None:
if not isinstance(valid, (list, tuple) ):
valid = (valid,)
if Model not in valid:
raise StoreException('Model type mismatch')
# This will raise Model.DoesNotExist if lookup fails
return Model._default_manager.get(pk=pk)
def get_fields_and_servicemethods(bases, attrs, include_bases=True):
""" This function was pilfered (and slightly modified) from django/forms/forms.py
See the original function for doc and comments.
"""
fields = [ (field_name, attrs.pop(field_name)) for \
field_name, obj in attrs.items() if isinstance(obj, StoreField)]
# Get the method name directly from the __servicemethod__ dict
# as set by the decorator
methods = [ (method.__servicemethod__['name'], method) for \
method in attrs.values() if hasattr(method, '__servicemethod__') ]
if include_bases:
for base in bases[::-1]:
# Grab the fields and servicemethods from the base classes
try:
fields = base.fields.items() + fields
except AttributeError:
pass
try:
methods = base.servicemethods.items() + methods
except AttributeError:
pass
return SortedDict(fields), SortedDict(methods)
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
""" resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'
Modification Note:
(unless it's the special '__unicode__' method)
If the optional allow_dotted_names argument is False, dots are not
supported and this function operates similar to getattr(obj, attr).
NOTE:
This method was (mostly) copied straight over from SimpleXMLRPCServer.py in the
standard library
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_') and i != '__unicode__': # Allow the __unicode__ method to be called
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj

View file

View file

@ -0,0 +1,76 @@
from django.core.serializers.json import DateTimeAwareJSONEncoder
from django.db.models.query import QuerySet
from django.utils import simplejson as json
from piston.emitters import Emitter
from piston.validate_jsonp import is_valid_jsonp_callback_value
class DojoDataEmitter(Emitter):
"""
This emitter is designed to render dojo.data.ItemFileReadStore compatible
data.
Requires your handler to expose the `id` field of your model, that Piston
excludes in the default setting. The item's label is the unicode
representation of your model unless it already has a field with the
name `_unicode`.
Optional GET variables:
`callback`: JSONP callback
`indent`: Number of spaces for JSON indentation
If you serialize Django models and nest related models (which is a common
case), make sure to set the `hierarchical` parameter of the
ItemFileReadStore to false (which defaults to true).
"""
def render(self, request):
"""
Renders dojo.data compatible JSON if self.data is a QuerySet, falls
back to standard JSON.
"""
callback = request.GET.get('callback', None)
try:
indent = int(request.GET['indent'])
except (KeyError, ValueError):
indent = None
data = self.construct()
if isinstance(self.data, QuerySet):
unicode_lookup_table = dict()
[unicode_lookup_table.__setitem__(item.pk, unicode(item)) \
for item in self.data]
for dict_item in data:
try:
id = dict_item['id']
except KeyError:
raise KeyError('The handler of the model that you want '\
'to emit as DojoData needs to expose the `id` field!')
else:
dict_item.setdefault('_unicode', unicode_lookup_table[id])
data = {
'identifier': 'id',
'items': data,
'label': '_unicode',
'numRows': self.data.count(),
}
serialized_data = json.dumps(data, ensure_ascii=False,
cls=DateTimeAwareJSONEncoder, indent=indent)
if callback and is_valid_jsonp_callback_value(callback):
return '%s(%s)' % (callback, serialized_data)
return serialized_data
def register_emitters():
"""
Registers the DojoDataEmitter with the name 'dojodata'.
"""
Emitter.register('dojodata', DojoDataEmitter,
'application/json; charset=utf-8')

146
ntuh/dojango/decorators.py Executable file
View file

@ -0,0 +1,146 @@
from django.http import HttpResponseNotAllowed, HttpResponseServerError
from django.utils import simplejson as json
from util import to_json_response
from util import to_dojo_data
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
def expect_post_request(func):
"""Allow only POST requests to come in, throw an exception otherwise.
This relieves from checking every time that the request is
really a POST request, which it should be when using this
decorator.
"""
def _ret(*args, **kwargs):
ret = func(*args, **kwargs)
request = args[0]
if not request.method=='POST':
return HttpResponseNotAllowed(['POST'])
return ret
return _ret
def add_request_getdict(func):
"""Add the method getdict() to the request object.
This works just like getlist() only that it decodes any nested
JSON encoded object structure.
Since sending deep nested structures is not possible via
GET/POST by default, this enables it. Of course you need to
make sure that on the JavaScript side you are also sending
the data properly, which dojango.send() automatically does.
Example:
this is being sent:
one:1
two:{"three":3, "four":4}
using
request.POST.getdict('two')
returns a dict containing the values sent by the JavaScript.
"""
def _ret(*args, **kwargs):
args[0].POST.__class__.getdict = __getdict
ret = func(*args, **kwargs)
return ret
return _ret
def __getdict(self, key):
ret = self.get(key)
try:
ret = json.loads(ret)
except ValueError: # The value was not JSON encoded :-)
raise Exception('"%s" was not JSON encoded as expected (%s).' % (key, str(ret)))
return ret
def json_response(func):
"""
A simple json response decorator. Use it on views, where a python data object should be converted
to a json response:
@json_response
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret)
return wraps(func)(inner)
def jsonp_response_custom(callback_param_name):
"""
A jsonp (JSON with Padding) response decorator, where you can define your own callbackParamName.
It acts like the json_response decorator but with the difference, that it
wraps the returned json string into a client-specified function name (that is the Padding).
You can add this decorator to a function like that:
@jsonp_response_custom("my_callback_param")
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
Your now can access this view from a foreign URL using JSONP.
An example with Dojo looks like that:
dojo.io.script.get({ url:"http://example.com/my_url/",
callbackParamName:"my_callback_param",
load: function(response){
console.log(response);
}
});
Note: the callback_param_name in the decorator and in your JavaScript JSONP call must be the same.
"""
def decorator(func):
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, callback_param_name=callback_param_name)
return wraps(func)(inner)
return decorator
jsonp_response = jsonp_response_custom("jsonp_callback")
jsonp_response.__doc__ = "A predefined jsonp response decorator using 'jsoncallback' as a fixed callback_param_name."
def json_iframe_response(func):
"""
A simple json response decorator but wrapping the json response into a html page.
It helps when doing a json request using an iframe (e.g. file up-/download):
@json_iframe
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, use_iframe=True)
return wraps(func)(inner)
def __prepare_json_ret(request, ret, callback_param_name=None, use_iframe=False):
if ret==False:
ret = {'success':False}
elif ret==None: # Sometimes there is no return.
ret = {}
# Add the 'ret'=True, since it was obviously no set yet and we got valid data, no exception.
func_name = None
if callback_param_name:
func_name = request.GET.get(callback_param_name, "callbackParamName")
try:
if not ret.has_key('success'):
ret['success'] = True
except AttributeError, e:
raise Exception("The returned data of your function must be a dictionary!")
json_ret = ""
try:
# Sometimes the serialization fails, i.e. when there are too deeply nested objects or even classes inside
json_ret = to_json_response(ret, func_name, use_iframe)
except Exception, e:
print '\n\n===============Exception=============\n\n'+str(e)+'\n\n'
print ret
print '\n\n'
return HttpResponseServerError(content=str(e))
return json_ret

Some files were not shown because too many files have changed in this diff Show more